mirror of https://github.com/k3s-io/k3s
e2e: modular framework
Not all users of the E2E framework want to run cloud-provider specific tests. By splitting out the code it becomes possible to decide in a E2E test suite which providers are supported. This is achieved in two ways: - the framework calls certain functions through a provider interface instead of calling specific cloud provider functions directly - tests that are cloud-provider specific directly import the new provider packages The ingress test utilities are only needed by a few tests. Splitting them out into a separate package makes the framework simpler for test suites not using those tests. Fixes: #66649pull/58/head
parent
8012b9583e
commit
8b17db7e0c
|
@ -737,7 +737,12 @@ test/e2e/autoscaling
|
|||
test/e2e/chaosmonkey
|
||||
test/e2e/common
|
||||
test/e2e/framework
|
||||
test/e2e/framework/ingress
|
||||
test/e2e/framework/metrics
|
||||
test/e2e/framework/providers/aws
|
||||
test/e2e/framework/providers/azure
|
||||
test/e2e/framework/providers/gce
|
||||
test/e2e/framework/providers/kubemark
|
||||
test/e2e/framework/timer
|
||||
test/e2e/instrumentation
|
||||
test/e2e/instrumentation/logging
|
||||
|
|
|
@ -34,8 +34,6 @@ import (
|
|||
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apiserver/pkg/util/logs"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -46,86 +44,18 @@ import (
|
|||
|
||||
// ensure auth plugins are loaded
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
||||
// ensure that cloud providers are loaded
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/aws"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/azure"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark"
|
||||
)
|
||||
|
||||
var (
|
||||
cloudConfig = &framework.TestContext.CloudConfig
|
||||
)
|
||||
|
||||
// setupProviderConfig validates and sets up cloudConfig based on framework.TestContext.Provider.
|
||||
func setupProviderConfig() error {
|
||||
switch framework.TestContext.Provider {
|
||||
case "":
|
||||
glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.")
|
||||
|
||||
case "gce", "gke":
|
||||
framework.Logf("Fetching cloud provider for %q\r", framework.TestContext.Provider)
|
||||
zone := framework.TestContext.CloudConfig.Zone
|
||||
region := framework.TestContext.CloudConfig.Region
|
||||
|
||||
var err error
|
||||
if region == "" {
|
||||
region, err = gcecloud.GetGCERegion(zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
|
||||
}
|
||||
}
|
||||
managedZones := []string{} // Manage all zones in the region
|
||||
if !framework.TestContext.CloudConfig.MultiZone {
|
||||
managedZones = []string{zone}
|
||||
}
|
||||
|
||||
gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{
|
||||
ApiEndpoint: framework.TestContext.CloudConfig.ApiEndpoint,
|
||||
ProjectID: framework.TestContext.CloudConfig.ProjectID,
|
||||
Region: region,
|
||||
Zone: zone,
|
||||
ManagedZones: managedZones,
|
||||
NetworkName: "", // TODO: Change this to use framework.TestContext.CloudConfig.Network?
|
||||
SubnetworkName: "",
|
||||
NodeTags: nil,
|
||||
NodeInstancePrefix: "",
|
||||
TokenSource: nil,
|
||||
UseMetadataServer: false,
|
||||
AlphaFeatureGate: gcecloud.NewAlphaFeatureGate([]string{}),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error building GCE/GKE provider: %v", err)
|
||||
}
|
||||
|
||||
cloudConfig.Provider = gceCloud
|
||||
|
||||
// Arbitrarily pick one of the zones we have nodes in
|
||||
if cloudConfig.Zone == "" && framework.TestContext.CloudConfig.MultiZone {
|
||||
zones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cloudConfig.Zone, _ = zones.PopAny()
|
||||
}
|
||||
|
||||
case "aws":
|
||||
if cloudConfig.Zone == "" {
|
||||
return fmt.Errorf("gce-zone must be specified for AWS")
|
||||
}
|
||||
case "azure":
|
||||
if cloudConfig.ConfigFile == "" {
|
||||
return fmt.Errorf("config-file must be specified for Azure")
|
||||
}
|
||||
config, err := os.Open(cloudConfig.ConfigFile)
|
||||
if err != nil {
|
||||
framework.Logf("Couldn't open cloud provider configuration %s: %#v",
|
||||
cloudConfig.ConfigFile, err)
|
||||
}
|
||||
defer config.Close()
|
||||
cloudConfig.Provider, err = azure.NewCloud(config)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// There are certain operations we only want to run once per overall test invocation
|
||||
// (such as deleting old namespaces, or verifying that all system pods are running.
|
||||
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
|
||||
|
@ -137,10 +67,6 @@ func setupProviderConfig() error {
|
|||
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
||||
// Run only on Ginkgo node 1
|
||||
|
||||
if err := setupProviderConfig(); err != nil {
|
||||
framework.Failf("Failed to setup provider config: %v", err)
|
||||
}
|
||||
|
||||
switch framework.TestContext.Provider {
|
||||
case "gce", "gke":
|
||||
framework.LogClusterImageSources()
|
||||
|
@ -214,12 +140,6 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
|||
|
||||
}, func(data []byte) {
|
||||
// Run on all Ginkgo nodes
|
||||
|
||||
if cloudConfig.Provider == nil {
|
||||
if err := setupProviderConfig(); err != nil {
|
||||
framework.Failf("Failed to setup provider config: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Similar to SynchornizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
|
||||
|
|
|
@ -14,6 +14,11 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package framework contains provider-independent helper code for
|
||||
// building and running E2E tests with Ginkgo. The actual Ginkgo test
|
||||
// suites gets assembled by combining this framework, the optional
|
||||
// provider support code and specific tests via a separate .go file
|
||||
// like Kubernetes' test/e2e.go.
|
||||
package framework
|
||||
|
||||
import (
|
||||
|
@ -36,17 +41,14 @@ import (
|
|||
"k8s.io/client-go/discovery"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
csi "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
|
@ -108,8 +110,6 @@ type Framework struct {
|
|||
// or stdout if ReportDir is not set once test ends.
|
||||
TestSummaries []TestDataSummary
|
||||
|
||||
kubemarkControllerCloseChannel chan struct{}
|
||||
|
||||
// Place to keep ClusterAutoscaler metrics from before test in order to compute delta.
|
||||
clusterAutoscalerMetricsBeforeTest metrics.MetricsCollection
|
||||
}
|
||||
|
@ -210,25 +210,7 @@ func (f *Framework) BeforeEach() {
|
|||
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
|
||||
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
|
||||
|
||||
if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil {
|
||||
externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig)
|
||||
externalConfig.QPS = f.Options.ClientQPS
|
||||
externalConfig.Burst = f.Options.ClientBurst
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalClient, err := clientset.NewForConfig(externalConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.KubemarkExternalClusterClientSet = externalClient
|
||||
f.kubemarkControllerCloseChannel = make(chan struct{})
|
||||
externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0)
|
||||
kubemarkInformerFactory := informers.NewSharedInformerFactory(f.ClientSet, 0)
|
||||
kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes()
|
||||
go kubemarkNodeInformer.Informer().Run(f.kubemarkControllerCloseChannel)
|
||||
TestContext.CloudConfig.KubemarkController, err = kubemark.NewKubemarkController(f.KubemarkExternalClusterClientSet, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalInformerFactory.Start(f.kubemarkControllerCloseChannel)
|
||||
Expect(TestContext.CloudConfig.KubemarkController.WaitForCacheSync(f.kubemarkControllerCloseChannel)).To(BeTrue())
|
||||
go TestContext.CloudConfig.KubemarkController.Run(f.kubemarkControllerCloseChannel)
|
||||
}
|
||||
TestContext.CloudConfig.Provider.FrameworkBeforeEach(f)
|
||||
}
|
||||
|
||||
if !f.SkipNamespaceCreation {
|
||||
|
@ -393,9 +375,7 @@ func (f *Framework) AfterEach() {
|
|||
}
|
||||
}
|
||||
|
||||
if TestContext.CloudConfig.KubemarkController != nil {
|
||||
close(f.kubemarkControllerCloseChannel)
|
||||
}
|
||||
TestContext.CloudConfig.Provider.FrameworkAfterEach(f)
|
||||
|
||||
// Report any flakes that were observed in the e2e test and reset.
|
||||
if f.flakeReport != nil && f.flakeReport.GetFlakeCount() > 0 {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
type Factory func() (ProviderInterface, error)
|
||||
|
||||
var (
|
||||
providers = make(map[string]Factory)
|
||||
mutex sync.Mutex
|
||||
)
|
||||
|
||||
// RegisterProvider is expected to be called during application init,
|
||||
// typically by an init function in a provider package.
|
||||
func RegisterProvider(name string, factory Factory) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
if _, ok := providers[name]; ok {
|
||||
panic(fmt.Sprintf("provider %s already registered", name))
|
||||
}
|
||||
providers[name] = factory
|
||||
}
|
||||
|
||||
func init() {
|
||||
// "local" can always be used.
|
||||
RegisterProvider("local", func() (ProviderInterface, error) {
|
||||
return NullProvider{}, nil
|
||||
})
|
||||
// The empty string also works, but triggers a warning.
|
||||
RegisterProvider("", func() (ProviderInterface, error) {
|
||||
Logf("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.")
|
||||
return NullProvider{}, nil
|
||||
})
|
||||
}
|
||||
|
||||
// SetupProviderConfig validates the chosen provider and creates
|
||||
// an interface instance for it.
|
||||
func SetupProviderConfig(providerName string) (ProviderInterface, error) {
|
||||
var err error
|
||||
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
factory, ok := providers[providerName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("The provider %s is unknown.", providerName)
|
||||
}
|
||||
provider, err := factory()
|
||||
|
||||
return provider, err
|
||||
}
|
||||
|
||||
// ProviderInterface contains the implementation for certain
|
||||
// provider-specific functionality.
|
||||
type ProviderInterface interface {
|
||||
FrameworkBeforeEach(f *Framework)
|
||||
FrameworkAfterEach(f *Framework)
|
||||
|
||||
ResizeGroup(group string, size int32) error
|
||||
GetGroupNodes(group string) ([]string, error)
|
||||
GroupSize(group string) (int, error)
|
||||
|
||||
CreatePD(zone string) (string, error)
|
||||
DeletePD(pdName string) error
|
||||
CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error)
|
||||
DeletePVSource(pvSource *v1.PersistentVolumeSource) error
|
||||
|
||||
CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string)
|
||||
|
||||
EnsureLoadBalancerResourcesDeleted(ip, portRange string) error
|
||||
LoadBalancerSrcRanges() []string
|
||||
EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service))
|
||||
}
|
||||
|
||||
// NullProvider is the default implementation of the ProviderInterface
|
||||
// which doesn't do anything.
|
||||
type NullProvider struct{}
|
||||
|
||||
func (n NullProvider) FrameworkBeforeEach(f *Framework) {}
|
||||
func (n NullProvider) FrameworkAfterEach(f *Framework) {}
|
||||
|
||||
func (n NullProvider) ResizeGroup(string, int32) error {
|
||||
return fmt.Errorf("Provider does not support InstanceGroups")
|
||||
}
|
||||
func (n NullProvider) GetGroupNodes(group string) ([]string, error) {
|
||||
return nil, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
func (n NullProvider) GroupSize(group string) (int, error) {
|
||||
return -1, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
|
||||
func (n NullProvider) CreatePD(zone string) (string, error) {
|
||||
return "", fmt.Errorf("provider does not support volume creation")
|
||||
}
|
||||
func (n NullProvider) DeletePD(pdName string) error {
|
||||
return fmt.Errorf("provider does not support volume deletion")
|
||||
}
|
||||
func (n NullProvider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) {
|
||||
return nil, fmt.Errorf("Provider not supported")
|
||||
}
|
||||
func (n NullProvider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
|
||||
return fmt.Errorf("Provider not supported")
|
||||
}
|
||||
|
||||
func (n NullProvider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||
}
|
||||
|
||||
func (n NullProvider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
return nil
|
||||
}
|
||||
func (n NullProvider) LoadBalancerSrcRanges() []string {
|
||||
return nil
|
||||
}
|
||||
func (n NullProvider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) {
|
||||
nop := func(svc *v1.Service) {}
|
||||
return nop, nop
|
||||
}
|
||||
|
||||
var _ ProviderInterface = NullProvider{}
|
|
@ -0,0 +1,129 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func init() {
|
||||
framework.RegisterProvider("aws", NewProvider)
|
||||
}
|
||||
|
||||
func NewProvider() (framework.ProviderInterface, error) {
|
||||
if framework.TestContext.CloudConfig.Zone == "" {
|
||||
return nil, fmt.Errorf("gce-zone must be specified for AWS")
|
||||
}
|
||||
return &Provider{}, nil
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
framework.NullProvider
|
||||
}
|
||||
|
||||
func (p *Provider) ResizeGroup(group string, size int32) error {
|
||||
client := autoscaling.New(session.New())
|
||||
return awscloud.ResizeInstanceGroup(client, group, int(size))
|
||||
}
|
||||
|
||||
func (p *Provider) GroupSize(group string) (int, error) {
|
||||
client := autoscaling.New(session.New())
|
||||
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error describing instance group: %v", err)
|
||||
}
|
||||
if instanceGroup == nil {
|
||||
return -1, fmt.Errorf("instance group not found: %s", group)
|
||||
}
|
||||
return instanceGroup.CurrentSize()
|
||||
}
|
||||
|
||||
func (p *Provider) CreatePD(zone string) (string, error) {
|
||||
client := newAWSClient(zone)
|
||||
request := &ec2.CreateVolumeInput{}
|
||||
request.AvailabilityZone = aws.String(zone)
|
||||
request.Size = aws.Int64(10)
|
||||
request.VolumeType = aws.String(awscloud.DefaultVolumeType)
|
||||
response, err := client.CreateVolume(request)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
az := aws.StringValue(response.AvailabilityZone)
|
||||
awsID := aws.StringValue(response.VolumeId)
|
||||
|
||||
volumeName := "aws://" + az + "/" + awsID
|
||||
return volumeName, nil
|
||||
}
|
||||
|
||||
func (p *Provider) DeletePD(pdName string) error {
|
||||
client := newAWSClient("")
|
||||
|
||||
tokens := strings.Split(pdName, "/")
|
||||
awsVolumeID := tokens[len(tokens)-1]
|
||||
|
||||
request := &ec2.DeleteVolumeInput{VolumeId: aws.String(awsVolumeID)}
|
||||
_, err := client.DeleteVolume(request)
|
||||
if err != nil {
|
||||
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
|
||||
framework.Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
|
||||
} else {
|
||||
return fmt.Errorf("error deleting EBS volumes: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) {
|
||||
return &v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: diskName,
|
||||
FSType: "ext3",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
|
||||
return framework.DeletePDWithRetry(pvSource.AWSElasticBlockStore.VolumeID)
|
||||
}
|
||||
|
||||
func newAWSClient(zone string) *ec2.EC2 {
|
||||
var cfg *aws.Config
|
||||
|
||||
if zone == "" {
|
||||
zone = framework.TestContext.CloudConfig.Zone
|
||||
}
|
||||
if zone == "" {
|
||||
framework.Logf("Warning: No AWS zone configured!")
|
||||
cfg = nil
|
||||
} else {
|
||||
region := zone[:len(zone)-1]
|
||||
cfg = &aws.Config{Region: aws.String(region)}
|
||||
}
|
||||
return ec2.New(session.New(), cfg)
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func init() {
|
||||
framework.RegisterProvider("azure", NewProvider)
|
||||
}
|
||||
|
||||
func NewProvider() (framework.ProviderInterface, error) {
|
||||
if framework.TestContext.CloudConfig.ConfigFile == "" {
|
||||
return nil, fmt.Errorf("config-file must be specified for Azure")
|
||||
}
|
||||
config, err := os.Open(framework.TestContext.CloudConfig.ConfigFile)
|
||||
if err != nil {
|
||||
framework.Logf("Couldn't open cloud provider configuration %s: %#v",
|
||||
framework.TestContext.CloudConfig.ConfigFile, err)
|
||||
}
|
||||
defer config.Close()
|
||||
azureCloud, err := azure.NewCloud(config)
|
||||
return &Provider{
|
||||
azureCloud: azureCloud.(*azure.Cloud),
|
||||
}, err
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
framework.NullProvider
|
||||
|
||||
azureCloud *azure.Cloud
|
||||
}
|
||||
|
||||
func (p *Provider) CreatePD(zone string) (string, error) {
|
||||
pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(uuid.NewUUID()))
|
||||
_, diskURI, _, err := p.azureCloud.CreateVolume(pdName, "" /* account */, "" /* sku */, "" /* location */, 1 /* sizeGb */)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return diskURI, nil
|
||||
}
|
||||
|
||||
func (p *Provider) DeletePD(pdName string) error {
|
||||
if err := p.azureCloud.DeleteVolume(pdName); err != nil {
|
||||
framework.Logf("failed to delete Azure volume %q: %v", pdName, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) {
|
||||
enable = func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Annotations = map[string]string{azure.ServiceAnnotationLoadBalancerInternal: "true"}
|
||||
}
|
||||
disable = func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Annotations = map[string]string{azure.ServiceAnnotationLoadBalancerInternal: "false"}
|
||||
}
|
||||
return
|
||||
}
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -23,15 +23,16 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -51,7 +52,7 @@ func MakeFirewallNameForLBService(name string) string {
|
|||
// ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||
func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Firewall {
|
||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
framework.Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeFirewallNameForLBService(cloudprovider.DefaultLoadBalancerName(svc))
|
||||
|
@ -77,7 +78,7 @@ func MakeHealthCheckFirewallNameForLBService(clusterID, name string, isNodesHeal
|
|||
// ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||
func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodeTag string, isNodesHealthCheck bool) *compute.Firewall {
|
||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
framework.Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), isNodesHealthCheck)
|
||||
|
@ -96,42 +97,6 @@ func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service,
|
|||
return &fw
|
||||
}
|
||||
|
||||
// GetInstanceTags gets tags from GCE instance with given name.
|
||||
func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags {
|
||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
|
||||
instanceName).Do()
|
||||
if err != nil {
|
||||
Failf("Failed to get instance tags for %v: %v", instanceName, err)
|
||||
}
|
||||
return res.Tags
|
||||
}
|
||||
|
||||
// SetInstanceTags sets tags on GCE instance with given name.
|
||||
func SetInstanceTags(cloudConfig CloudConfig, instanceName, zone string, tags []string) []string {
|
||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
// Re-get instance everytime because we need the latest fingerprint for updating metadata
|
||||
resTags := GetInstanceTags(cloudConfig, instanceName)
|
||||
_, err := gceCloud.ComputeServices().GA.Instances.SetTags(
|
||||
cloudConfig.ProjectID, zone, instanceName,
|
||||
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
|
||||
if err != nil {
|
||||
Failf("failed to set instance tags: %v", err)
|
||||
}
|
||||
Logf("Sent request to set tags %v on instance: %v", tags, instanceName)
|
||||
return resTags.Items
|
||||
}
|
||||
|
||||
// GetNodeTags gets k8s node tag from one of the nodes
|
||||
func GetNodeTags(c clientset.Interface, cloudConfig CloudConfig) []string {
|
||||
nodes := GetReadySchedulableNodesOrDie(c)
|
||||
if len(nodes.Items) == 0 {
|
||||
Logf("GetNodeTags: Found 0 node.")
|
||||
return []string{}
|
||||
}
|
||||
return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items
|
||||
}
|
||||
|
||||
// GetInstancePrefix returns the INSTANCE_PREFIX env we set for e2e cluster.
|
||||
// From cluster/gce/config-test.sh, master name is set up using below format:
|
||||
// MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
|
@ -437,7 +402,7 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset
|
|||
}
|
||||
|
||||
func WaitForFirewallRule(gceCloud *gcecloud.GCECloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) {
|
||||
Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist)
|
||||
framework.Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist)
|
||||
var fw *compute.Firewall
|
||||
var err error
|
||||
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package gce
|
||||
|
||||
import "testing"
|
||||
|
|
@ -0,0 +1,376 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func init() {
|
||||
framework.RegisterProvider("gce", factory)
|
||||
framework.RegisterProvider("gke", factory)
|
||||
}
|
||||
|
||||
func factory() (framework.ProviderInterface, error) {
|
||||
framework.Logf("Fetching cloud provider for %q\r", framework.TestContext.Provider)
|
||||
zone := framework.TestContext.CloudConfig.Zone
|
||||
region := framework.TestContext.CloudConfig.Region
|
||||
|
||||
var err error
|
||||
if region == "" {
|
||||
region, err = gcecloud.GetGCERegion(zone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
|
||||
}
|
||||
}
|
||||
managedZones := []string{} // Manage all zones in the region
|
||||
if !framework.TestContext.CloudConfig.MultiZone {
|
||||
managedZones = []string{zone}
|
||||
}
|
||||
|
||||
gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{
|
||||
ApiEndpoint: framework.TestContext.CloudConfig.ApiEndpoint,
|
||||
ProjectID: framework.TestContext.CloudConfig.ProjectID,
|
||||
Region: region,
|
||||
Zone: zone,
|
||||
ManagedZones: managedZones,
|
||||
NetworkName: "", // TODO: Change this to use framework.TestContext.CloudConfig.Network?
|
||||
SubnetworkName: "",
|
||||
NodeTags: nil,
|
||||
NodeInstancePrefix: "",
|
||||
TokenSource: nil,
|
||||
UseMetadataServer: false,
|
||||
AlphaFeatureGate: gcecloud.NewAlphaFeatureGate([]string{}),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error building GCE/GKE provider: %v", err)
|
||||
}
|
||||
|
||||
// Arbitrarily pick one of the zones we have nodes in
|
||||
if framework.TestContext.CloudConfig.Zone == "" && framework.TestContext.CloudConfig.MultiZone {
|
||||
zones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
framework.TestContext.CloudConfig.Zone, _ = zones.PopAny()
|
||||
}
|
||||
|
||||
return NewProvider(gceCloud), nil
|
||||
}
|
||||
|
||||
func NewProvider(gceCloud *gcecloud.GCECloud) framework.ProviderInterface {
|
||||
return &Provider{
|
||||
gceCloud: gceCloud,
|
||||
}
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
framework.NullProvider
|
||||
gceCloud *gcecloud.GCECloud
|
||||
}
|
||||
|
||||
func (p *Provider) ResizeGroup(group string, size int32) error {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize",
|
||||
group, fmt.Sprintf("--size=%v", size),
|
||||
"--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to resize node instance group %s: %s", group, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) GetGroupNodes(group string) ([]string, error) {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get nodes in instance group %s: %s", group, output)
|
||||
}
|
||||
re := regexp.MustCompile(".*RUNNING")
|
||||
lines := re.FindAllString(string(output), -1)
|
||||
for i, line := range lines {
|
||||
lines[i] = line[:strings.Index(line, " ")]
|
||||
}
|
||||
return lines, nil
|
||||
}
|
||||
|
||||
func (p *Provider) GroupSize(group string) (int, error) {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("Failed to get group size for group %s: %s", group, output)
|
||||
}
|
||||
re := regexp.MustCompile("RUNNING")
|
||||
return len(re.FindAllString(string(output), -1)), nil
|
||||
}
|
||||
|
||||
func (p *Provider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
project := framework.TestContext.CloudConfig.ProjectID
|
||||
region, err := gcecloud.GetGCERegion(framework.TestContext.CloudConfig.Zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get region for zone %q: %v", framework.TestContext.CloudConfig.Zone, err)
|
||||
}
|
||||
|
||||
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
service := p.gceCloud.ComputeServices().GA
|
||||
list, err := service.ForwardingRules.List(project, region).Do()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, item := range list.Items {
|
||||
if item.PortRange == portRange && item.IPAddress == ip {
|
||||
framework.Logf("found a load balancer: %v", item)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func getGCEZoneForGroup(group string) (string, error) {
|
||||
zone := framework.TestContext.CloudConfig.Zone
|
||||
if framework.TestContext.CloudConfig.MultiZone {
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "list",
|
||||
"--project="+framework.TestContext.CloudConfig.ProjectID, "--format=value(zone)", "--filter=name="+group).CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to get zone for node group %s: %s", group, output)
|
||||
}
|
||||
zone = strings.TrimSpace(string(output))
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
func (p *Provider) CreatePD(zone string) (string, error) {
|
||||
pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(uuid.NewUUID()))
|
||||
|
||||
if zone == "" && framework.TestContext.CloudConfig.MultiZone {
|
||||
zones, err := p.gceCloud.GetAllZonesFromCloudProvider()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
zone, _ = zones.PopAny()
|
||||
}
|
||||
|
||||
tags := map[string]string{}
|
||||
if err := p.gceCloud.CreateDisk(pdName, gcecloud.DiskTypeStandard, zone, 2 /* sizeGb */, tags); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return pdName, nil
|
||||
}
|
||||
|
||||
func (p *Provider) DeletePD(pdName string) error {
|
||||
err := p.gceCloud.DeleteDisk(pdName)
|
||||
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" {
|
||||
// PD already exists, ignore error.
|
||||
return nil
|
||||
}
|
||||
|
||||
framework.Logf("error deleting PD %q: %v", pdName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) {
|
||||
return &v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
|
||||
return framework.DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
|
||||
}
|
||||
|
||||
// CleanupResources cleans up GCE Service Type=LoadBalancer resources with
|
||||
// the given name. The name is usually the UUID of the Service prefixed with an
|
||||
// alpha-numeric character ('a') to work around cloudprovider rules.
|
||||
func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||
if pollErr := wait.Poll(5*time.Second, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := p.cleanupGCEResources(c, loadBalancerName, region, zone); err != nil {
|
||||
framework.Logf("Still waiting for glbc to cleanup: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); pollErr != nil {
|
||||
framework.Failf("Failed to cleanup service GCE resources.")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) cleanupGCEResources(c clientset.Interface, loadBalancerName, region, zone string) (retErr error) {
|
||||
if region == "" {
|
||||
// Attempt to parse region from zone if no region is given.
|
||||
var err error
|
||||
region, err = gcecloud.GetGCERegion(zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
|
||||
}
|
||||
}
|
||||
if err := p.gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = err
|
||||
}
|
||||
if err := p.gceCloud.DeleteRegionForwardingRule(loadBalancerName, region); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
|
||||
}
|
||||
if err := p.gceCloud.DeleteRegionAddress(loadBalancerName, region); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
}
|
||||
clusterID, err := GetClusterID(c)
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
return
|
||||
}
|
||||
hcNames := []string{gcecloud.MakeNodesHealthCheckName(clusterID)}
|
||||
hc, getErr := p.gceCloud.GetHttpHealthCheck(loadBalancerName)
|
||||
if getErr != nil && !IsGoogleAPIHTTPErrorCode(getErr, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, getErr)
|
||||
return
|
||||
}
|
||||
if hc != nil {
|
||||
hcNames = append(hcNames, hc.Name)
|
||||
}
|
||||
if err := p.gceCloud.DeleteExternalTargetPoolAndChecks(&v1.Service{}, loadBalancerName, region, clusterID, hcNames...); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Provider) LoadBalancerSrcRanges() []string {
|
||||
return gcecloud.LoadBalancerSrcRanges()
|
||||
}
|
||||
|
||||
func (p *Provider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) {
|
||||
enable = func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Annotations = map[string]string{gcecloud.ServiceAnnotationLoadBalancerType: string(gcecloud.LBTypeInternal)}
|
||||
}
|
||||
disable = func(svc *v1.Service) {
|
||||
delete(svc.ObjectMeta.Annotations, gcecloud.ServiceAnnotationLoadBalancerType)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetInstanceTags gets tags from GCE instance with given name.
|
||||
func GetInstanceTags(cloudConfig framework.CloudConfig, instanceName string) *compute.Tags {
|
||||
gceCloud := cloudConfig.Provider.(*Provider).gceCloud
|
||||
res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
|
||||
instanceName).Do()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get instance tags for %v: %v", instanceName, err)
|
||||
}
|
||||
return res.Tags
|
||||
}
|
||||
|
||||
// SetInstanceTags sets tags on GCE instance with given name.
|
||||
func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone string, tags []string) []string {
|
||||
gceCloud := cloudConfig.Provider.(*Provider).gceCloud
|
||||
// Re-get instance everytime because we need the latest fingerprint for updating metadata
|
||||
resTags := GetInstanceTags(cloudConfig, instanceName)
|
||||
_, err := gceCloud.ComputeServices().GA.Instances.SetTags(
|
||||
cloudConfig.ProjectID, zone, instanceName,
|
||||
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
|
||||
if err != nil {
|
||||
framework.Failf("failed to set instance tags: %v", err)
|
||||
}
|
||||
framework.Logf("Sent request to set tags %v on instance: %v", tags, instanceName)
|
||||
return resTags.Items
|
||||
}
|
||||
|
||||
// GetNodeTags gets k8s node tag from one of the nodes
|
||||
func GetNodeTags(c clientset.Interface, cloudConfig framework.CloudConfig) []string {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
if len(nodes.Items) == 0 {
|
||||
framework.Logf("GetNodeTags: Found 0 node.")
|
||||
return []string{}
|
||||
}
|
||||
return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items
|
||||
}
|
||||
|
||||
// IsHTTPErrorCode returns true if the error is a google api
|
||||
// error matching the corresponding HTTP error code.
|
||||
func IsGoogleAPIHTTPErrorCode(err error, code int) bool {
|
||||
apiErr, ok := err.(*googleapi.Error)
|
||||
return ok && apiErr.Code == code
|
||||
}
|
||||
|
||||
func GetGCECloud() (*gcecloud.GCECloud, error) {
|
||||
p, ok := framework.TestContext.CloudConfig.Provider.(*Provider)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCE provider: %#v", framework.TestContext.CloudConfig.Provider)
|
||||
}
|
||||
return p.gceCloud, nil
|
||||
}
|
||||
|
||||
func GetClusterID(c clientset.Interface) (string, error) {
|
||||
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
|
||||
if err != nil || cm == nil {
|
||||
return "", fmt.Errorf("error getting cluster ID: %v", err)
|
||||
}
|
||||
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
|
||||
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
|
||||
if !clusterIDExists {
|
||||
return "", fmt.Errorf("cluster ID not set")
|
||||
}
|
||||
if providerIDExists {
|
||||
return providerID, nil
|
||||
}
|
||||
return clusterID, nil
|
||||
}
|
|
@ -0,0 +1,817 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
utilexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
const (
|
||||
// Name of the config-map and key the ingress controller stores its uid in.
|
||||
uidConfigMap = "ingress-uid"
|
||||
uidKey = "uid"
|
||||
|
||||
// all cloud resources created by the ingress controller start with this
|
||||
// prefix.
|
||||
k8sPrefix = "k8s-"
|
||||
|
||||
// clusterDelimiter is the delimiter used by the ingress controller
|
||||
// to split uid from other naming/metadata.
|
||||
clusterDelimiter = "--"
|
||||
|
||||
// Cloud resources created by the ingress controller older than this
|
||||
// are automatically purged to prevent running out of quota.
|
||||
// TODO(37335): write soak tests and bump this up to a week.
|
||||
maxAge = 48 * time.Hour
|
||||
|
||||
// GCE only allows names < 64 characters, and the loadbalancer controller inserts
|
||||
// a single character of padding.
|
||||
nameLenLimit = 62
|
||||
)
|
||||
|
||||
// GCEIngressController manages implementation details of Ingress on GCE/GKE.
|
||||
type GCEIngressController struct {
|
||||
Ns string
|
||||
rcPath string
|
||||
UID string
|
||||
staticIPName string
|
||||
rc *v1.ReplicationController
|
||||
svc *v1.Service
|
||||
Client clientset.Interface
|
||||
Cloud framework.CloudConfig
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) CleanupGCEIngressController() error {
|
||||
return cont.CleanupGCEIngressControllerWithTimeout(framework.LoadBalancerCleanupTimeout)
|
||||
}
|
||||
|
||||
// CleanupGCEIngressControllerWithTimeout calls the GCEIngressController.Cleanup(false)
|
||||
// followed with deleting the static ip, and then a final GCEIngressController.Cleanup(true)
|
||||
func (cont *GCEIngressController) CleanupGCEIngressControllerWithTimeout(timeout time.Duration) error {
|
||||
pollErr := wait.Poll(5*time.Second, timeout, func() (bool, error) {
|
||||
if err := cont.Cleanup(false); err != nil {
|
||||
framework.Logf("Monitoring glbc's cleanup of gce resources:\n%v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
||||
// Always try to cleanup even if pollErr == nil, because the cleanup
|
||||
// routine also purges old leaked resources based on creation timestamp.
|
||||
By("Performing final delete of any remaining resources")
|
||||
if cleanupErr := cont.Cleanup(true); cleanupErr != nil {
|
||||
By(fmt.Sprintf("WARNING: possibly leaked resources: %v\n", cleanupErr))
|
||||
} else {
|
||||
By("No resources leaked.")
|
||||
}
|
||||
|
||||
// Static-IP allocated on behalf of the test, never deleted by the
|
||||
// controller. Delete this IP only after the controller has had a chance
|
||||
// to cleanup or it might interfere with the controller, causing it to
|
||||
// throw out confusing events.
|
||||
if ipErr := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
if err := cont.deleteStaticIPs(); err != nil {
|
||||
framework.Logf("Failed to delete static-ip: %v\n", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); ipErr != nil {
|
||||
// If this is a persistent error, the suite will fail when we run out
|
||||
// of quota anyway.
|
||||
By(fmt.Sprintf("WARNING: possibly leaked static IP: %v\n", ipErr))
|
||||
}
|
||||
|
||||
// Logging that the GLBC failed to cleanup GCE resources on ingress deletion
|
||||
// See kubernetes/ingress#431
|
||||
if pollErr != nil {
|
||||
return fmt.Errorf("error: L7 controller failed to delete all cloud resources on time. %v", pollErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) getL7AddonUID() (string, error) {
|
||||
framework.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap)
|
||||
cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if uid, ok := cm.Data[uidKey]; ok {
|
||||
return uid, nil
|
||||
}
|
||||
return "", fmt.Errorf("Could not find cluster UID for L7 addon pod")
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListGlobalForwardingRules() []*compute.ForwardingRule {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
fwdList := []*compute.ForwardingRule{}
|
||||
l, err := gceCloud.ListGlobalForwardingRules()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, fwd := range l {
|
||||
if cont.isOwned(fwd.Name) {
|
||||
fwdList = append(fwdList, fwd)
|
||||
}
|
||||
}
|
||||
return fwdList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteForwardingRule(del bool) string {
|
||||
msg := ""
|
||||
fwList := []compute.ForwardingRule{}
|
||||
for _, regex := range []string{fmt.Sprintf("%vfw-.*%v.*", k8sPrefix, clusterDelimiter), fmt.Sprintf("%vfws-.*%v.*", k8sPrefix, clusterDelimiter)} {
|
||||
gcloudComputeResourceList("forwarding-rules", regex, cont.Cloud.ProjectID, &fwList)
|
||||
if len(fwList) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, f := range fwList {
|
||||
if !cont.canDelete(f.Name, f.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
GcloudComputeResourceDelete("forwarding-rules", f.Name, cont.Cloud.ProjectID, "--global")
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (forwarding rule)\n", f.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) GetGlobalAddress(ipName string) *compute.Address {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
ip, err := gceCloud.GetGlobalAddress(ipName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return ip
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteAddresses(del bool) string {
|
||||
msg := ""
|
||||
ipList := []compute.Address{}
|
||||
regex := fmt.Sprintf("%vfw-.*%v.*", k8sPrefix, clusterDelimiter)
|
||||
gcloudComputeResourceList("addresses", regex, cont.Cloud.ProjectID, &ipList)
|
||||
if len(ipList) != 0 {
|
||||
for _, ip := range ipList {
|
||||
if !cont.canDelete(ip.Name, ip.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
GcloudComputeResourceDelete("addresses", ip.Name, cont.Cloud.ProjectID, "--global")
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (static-ip)\n", ip.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListTargetHttpProxies() []*compute.TargetHttpProxy {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
tpList := []*compute.TargetHttpProxy{}
|
||||
l, err := gceCloud.ListTargetHttpProxies()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, tp := range l {
|
||||
if cont.isOwned(tp.Name) {
|
||||
tpList = append(tpList, tp)
|
||||
}
|
||||
}
|
||||
return tpList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListTargetHttpsProxies() []*compute.TargetHttpsProxy {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
tpsList := []*compute.TargetHttpsProxy{}
|
||||
l, err := gceCloud.ListTargetHttpsProxies()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, tps := range l {
|
||||
if cont.isOwned(tps.Name) {
|
||||
tpsList = append(tpsList, tps)
|
||||
}
|
||||
}
|
||||
return tpsList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteTargetProxy(del bool) string {
|
||||
msg := ""
|
||||
tpList := []compute.TargetHttpProxy{}
|
||||
regex := fmt.Sprintf("%vtp-.*%v.*", k8sPrefix, clusterDelimiter)
|
||||
gcloudComputeResourceList("target-http-proxies", regex, cont.Cloud.ProjectID, &tpList)
|
||||
if len(tpList) != 0 {
|
||||
for _, t := range tpList {
|
||||
if !cont.canDelete(t.Name, t.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
GcloudComputeResourceDelete("target-http-proxies", t.Name, cont.Cloud.ProjectID)
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (target-http-proxy)\n", t.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
tpsList := []compute.TargetHttpsProxy{}
|
||||
regex = fmt.Sprintf("%vtps-.*%v.*", k8sPrefix, clusterDelimiter)
|
||||
gcloudComputeResourceList("target-https-proxies", regex, cont.Cloud.ProjectID, &tpsList)
|
||||
if len(tpsList) != 0 {
|
||||
for _, t := range tpsList {
|
||||
if !cont.canDelete(t.Name, t.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
GcloudComputeResourceDelete("target-https-proxies", t.Name, cont.Cloud.ProjectID)
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (target-https-proxy)\n", t.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListUrlMaps() []*compute.UrlMap {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
umList := []*compute.UrlMap{}
|
||||
l, err := gceCloud.ListUrlMaps()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, um := range l {
|
||||
if cont.isOwned(um.Name) {
|
||||
umList = append(umList, um)
|
||||
}
|
||||
}
|
||||
return umList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteURLMap(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
umList, err := gceCloud.ListUrlMaps()
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
}
|
||||
return fmt.Sprintf("Failed to list url maps: %v", err)
|
||||
}
|
||||
if len(umList) == 0 {
|
||||
return msg
|
||||
}
|
||||
for _, um := range umList {
|
||||
if !cont.canDelete(um.Name, um.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
framework.Logf("Deleting url-map: %s", um.Name)
|
||||
if err := gceCloud.DeleteUrlMap(um.Name); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete url map %v\n", um.Name)
|
||||
}
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (url-map)\n", um.Name)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListGlobalBackendServices() []*compute.BackendService {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
beList := []*compute.BackendService{}
|
||||
l, err := gceCloud.ListGlobalBackendServices()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, be := range l {
|
||||
if cont.isOwned(be.Name) {
|
||||
beList = append(beList, be)
|
||||
}
|
||||
}
|
||||
return beList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteBackendService(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
beList, err := gceCloud.ListGlobalBackendServices()
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
}
|
||||
return fmt.Sprintf("Failed to list backend services: %v", err)
|
||||
}
|
||||
if len(beList) == 0 {
|
||||
framework.Logf("No backend services found")
|
||||
return msg
|
||||
}
|
||||
for _, be := range beList {
|
||||
if !cont.canDelete(be.Name, be.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
framework.Logf("Deleting backed-service: %s", be.Name)
|
||||
if err := gceCloud.DeleteGlobalBackendService(be.Name); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete backend service %v: %v\n", be.Name, err)
|
||||
}
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (backend-service)\n", be.Name)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteHTTPHealthCheck(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
hcList, err := gceCloud.ListHttpHealthChecks()
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
}
|
||||
return fmt.Sprintf("Failed to list HTTP health checks: %v", err)
|
||||
}
|
||||
if len(hcList) == 0 {
|
||||
return msg
|
||||
}
|
||||
for _, hc := range hcList {
|
||||
if !cont.canDelete(hc.Name, hc.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
framework.Logf("Deleting http-health-check: %s", hc.Name)
|
||||
if err := gceCloud.DeleteHttpHealthCheck(hc.Name); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete HTTP health check %v\n", hc.Name)
|
||||
}
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (http-health-check)\n", hc.Name)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListSslCertificates() []*compute.SslCertificate {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
sslList := []*compute.SslCertificate{}
|
||||
l, err := gceCloud.ListSslCertificates()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, ssl := range l {
|
||||
if cont.isOwned(ssl.Name) {
|
||||
sslList = append(sslList, ssl)
|
||||
}
|
||||
}
|
||||
return sslList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteSSLCertificate(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
sslList, err := gceCloud.ListSslCertificates()
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
}
|
||||
return fmt.Sprintf("Failed to list ssl certificates: %v", err)
|
||||
}
|
||||
if len(sslList) != 0 {
|
||||
for _, s := range sslList {
|
||||
if !cont.canDelete(s.Name, s.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
framework.Logf("Deleting ssl-certificate: %s", s.Name)
|
||||
if err := gceCloud.DeleteSslCertificate(s.Name); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete ssl certificates: %v\n", s.Name)
|
||||
}
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (ssl-certificate)\n", s.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListInstanceGroups() []*compute.InstanceGroup {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
igList := []*compute.InstanceGroup{}
|
||||
l, err := gceCloud.ListInstanceGroups(cont.Cloud.Zone)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, ig := range l {
|
||||
if cont.isOwned(ig.Name) {
|
||||
igList = append(igList, ig)
|
||||
}
|
||||
}
|
||||
return igList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteInstanceGroup(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
// TODO: E2E cloudprovider has only 1 zone, but the cluster can have many.
|
||||
// We need to poll on all IGs across all zones.
|
||||
igList, err := gceCloud.ListInstanceGroups(cont.Cloud.Zone)
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
}
|
||||
return fmt.Sprintf("Failed to list instance groups: %v", err)
|
||||
}
|
||||
if len(igList) == 0 {
|
||||
return msg
|
||||
}
|
||||
for _, ig := range igList {
|
||||
if !cont.canDelete(ig.Name, ig.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
framework.Logf("Deleting instance-group: %s", ig.Name)
|
||||
if err := gceCloud.DeleteInstanceGroup(ig.Name, cont.Cloud.Zone); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete instance group %v\n", ig.Name)
|
||||
}
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (instance-group)\n", ig.Name)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteNetworkEndpointGroup(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
// TODO: E2E cloudprovider has only 1 zone, but the cluster can have many.
|
||||
// We need to poll on all NEGs across all zones.
|
||||
negList, err := gceCloud.ListNetworkEndpointGroup(cont.Cloud.Zone)
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
}
|
||||
// Do not return error as NEG is still alpha.
|
||||
framework.Logf("Failed to list network endpoint group: %v", err)
|
||||
return msg
|
||||
}
|
||||
if len(negList) == 0 {
|
||||
return msg
|
||||
}
|
||||
for _, neg := range negList {
|
||||
if !cont.canDeleteNEG(neg.Name, neg.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
framework.Logf("Deleting network-endpoint-group: %s", neg.Name)
|
||||
if err := gceCloud.DeleteNetworkEndpointGroup(neg.Name, cont.Cloud.Zone); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete network endpoint group %v\n", neg.Name)
|
||||
}
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (network-endpoint-group)\n", neg.Name)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// canDelete returns true if either the name ends in a suffix matching this
|
||||
// controller's UID, or the creationTimestamp exceeds the maxAge and del is set
|
||||
// to true. Always returns false if the name doesn't match that we expect for
|
||||
// Ingress cloud resources.
|
||||
func (cont *GCEIngressController) canDelete(resourceName, creationTimestamp string, delOldResources bool) bool {
|
||||
// ignore everything not created by an ingress controller.
|
||||
splitName := strings.Split(resourceName, clusterDelimiter)
|
||||
if !strings.HasPrefix(resourceName, k8sPrefix) || len(splitName) != 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Resources created by the GLBC have a "0"" appended to the end if truncation
|
||||
// occurred. Removing the zero allows the following match.
|
||||
truncatedClusterUID := splitName[1]
|
||||
if len(truncatedClusterUID) >= 1 && strings.HasSuffix(truncatedClusterUID, "0") {
|
||||
truncatedClusterUID = truncatedClusterUID[:len(truncatedClusterUID)-1]
|
||||
}
|
||||
|
||||
// always delete things that are created by the current ingress controller.
|
||||
// Because of resource name truncation, this looks for a common prefix
|
||||
if strings.HasPrefix(cont.UID, truncatedClusterUID) {
|
||||
return true
|
||||
}
|
||||
if !delOldResources {
|
||||
return false
|
||||
}
|
||||
return canDeleteWithTimestamp(resourceName, creationTimestamp)
|
||||
}
|
||||
|
||||
// isOwned returns true if the resourceName ends in a suffix matching this
|
||||
// controller UID.
|
||||
func (cont *GCEIngressController) isOwned(resourceName string) bool {
|
||||
return cont.canDelete(resourceName, "", false)
|
||||
}
|
||||
|
||||
// canDeleteNEG returns true if either the name contains this controller's UID,
|
||||
// or the creationTimestamp exceeds the maxAge and del is set to true.
|
||||
func (cont *GCEIngressController) canDeleteNEG(resourceName, creationTimestamp string, delOldResources bool) bool {
|
||||
if !strings.HasPrefix(resourceName, "k8s") {
|
||||
return false
|
||||
}
|
||||
|
||||
if strings.Contains(resourceName, cont.UID) {
|
||||
return true
|
||||
}
|
||||
|
||||
if !delOldResources {
|
||||
return false
|
||||
}
|
||||
|
||||
return canDeleteWithTimestamp(resourceName, creationTimestamp)
|
||||
}
|
||||
|
||||
func canDeleteWithTimestamp(resourceName, creationTimestamp string) bool {
|
||||
createdTime, err := time.Parse(time.RFC3339, creationTimestamp)
|
||||
if err != nil {
|
||||
framework.Logf("WARNING: Failed to parse creation timestamp %v for %v: %v", creationTimestamp, resourceName, err)
|
||||
return false
|
||||
}
|
||||
if time.Since(createdTime) > maxAge {
|
||||
framework.Logf("%v created on %v IS too old", resourceName, creationTimestamp)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetFirewallRuleName returns the name of the firewall used for the GCEIngressController.
|
||||
func (cont *GCEIngressController) GetFirewallRuleName() string {
|
||||
return fmt.Sprintf("%vfw-l7%v%v", k8sPrefix, clusterDelimiter, cont.UID)
|
||||
}
|
||||
|
||||
// GetFirewallRule returns the firewall used by the GCEIngressController.
|
||||
// Causes a fatal error incase of an error.
|
||||
// TODO: Rename this to GetFirewallRuleOrDie and similarly rename all other
|
||||
// methods here to be consistent with rest of the code in this repo.
|
||||
func (cont *GCEIngressController) GetFirewallRule() *compute.Firewall {
|
||||
fw, err := cont.GetFirewallRuleOrError()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return fw
|
||||
}
|
||||
|
||||
// GetFirewallRule returns the firewall used by the GCEIngressController.
|
||||
// Returns an error if that fails.
|
||||
// TODO: Rename this to GetFirewallRule when the above method with that name is renamed.
|
||||
func (cont *GCEIngressController) GetFirewallRuleOrError() (*compute.Firewall, error) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
fwName := cont.GetFirewallRuleName()
|
||||
return gceCloud.GetFirewall(fwName)
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteFirewallRule(del bool) (msg string) {
|
||||
fwList := []compute.Firewall{}
|
||||
regex := fmt.Sprintf("%vfw-l7%v.*", k8sPrefix, clusterDelimiter)
|
||||
gcloudComputeResourceList("firewall-rules", regex, cont.Cloud.ProjectID, &fwList)
|
||||
if len(fwList) != 0 {
|
||||
for _, f := range fwList {
|
||||
if !cont.canDelete(f.Name, f.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
GcloudComputeResourceDelete("firewall-rules", f.Name, cont.Cloud.ProjectID)
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (firewall rule)\n", f.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) isHTTPErrorCode(err error, code int) bool {
|
||||
apiErr, ok := err.(*googleapi.Error)
|
||||
return ok && apiErr.Code == code
|
||||
}
|
||||
|
||||
// BackendServiceUsingNEG returns true only if all global backend service with matching nodeports pointing to NEG as backend
|
||||
func (cont *GCEIngressController) BackendServiceUsingNEG(svcPorts map[string]v1.ServicePort) (bool, error) {
|
||||
return cont.backendMode(svcPorts, "networkEndpointGroups")
|
||||
}
|
||||
|
||||
// BackendServiceUsingIG returns true only if all global backend service with matching svcPorts pointing to IG as backend
|
||||
func (cont *GCEIngressController) BackendServiceUsingIG(svcPorts map[string]v1.ServicePort) (bool, error) {
|
||||
return cont.backendMode(svcPorts, "instanceGroups")
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) backendMode(svcPorts map[string]v1.ServicePort, keyword string) (bool, error) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
beList, err := gceCloud.ListGlobalBackendServices()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list backend services: %v", err)
|
||||
}
|
||||
|
||||
hcList, err := gceCloud.ListHealthChecks()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list health checks: %v", err)
|
||||
}
|
||||
|
||||
uid := cont.UID
|
||||
if len(uid) > 8 {
|
||||
uid = uid[:8]
|
||||
}
|
||||
|
||||
matchingBackendService := 0
|
||||
for svcName, sp := range svcPorts {
|
||||
match := false
|
||||
bsMatch := &compute.BackendService{}
|
||||
// Non-NEG BackendServices are named with the Nodeport in the name.
|
||||
// NEG BackendServices' names contain the a sha256 hash of a string.
|
||||
negString := strings.Join([]string{uid, cont.Ns, svcName, fmt.Sprintf("%v", sp.Port)}, ";")
|
||||
negHash := fmt.Sprintf("%x", sha256.Sum256([]byte(negString)))[:8]
|
||||
for _, bs := range beList {
|
||||
if strings.Contains(bs.Name, strconv.Itoa(int(sp.NodePort))) ||
|
||||
strings.Contains(bs.Name, negHash) {
|
||||
match = true
|
||||
bsMatch = bs
|
||||
matchingBackendService += 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if match {
|
||||
for _, be := range bsMatch.Backends {
|
||||
if !strings.Contains(be.Group, keyword) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the correct HealthCheck exists for the BackendService
|
||||
hcMatch := false
|
||||
for _, hc := range hcList {
|
||||
if hc.Name == bsMatch.Name {
|
||||
hcMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !hcMatch {
|
||||
return false, fmt.Errorf("missing healthcheck for backendservice: %v", bsMatch.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return matchingBackendService == len(svcPorts), nil
|
||||
}
|
||||
|
||||
// Cleanup cleans up cloud resources.
|
||||
// If del is false, it simply reports existing resources without deleting them.
|
||||
// If dle is true, it deletes resources it finds acceptable (see canDelete func).
|
||||
func (cont *GCEIngressController) Cleanup(del bool) error {
|
||||
// Ordering is important here because we cannot delete resources that other
|
||||
// resources hold references to.
|
||||
errMsg := cont.deleteForwardingRule(del)
|
||||
// Static IPs are named after forwarding rules.
|
||||
errMsg += cont.deleteAddresses(del)
|
||||
|
||||
errMsg += cont.deleteTargetProxy(del)
|
||||
errMsg += cont.deleteURLMap(del)
|
||||
errMsg += cont.deleteBackendService(del)
|
||||
errMsg += cont.deleteHTTPHealthCheck(del)
|
||||
|
||||
errMsg += cont.deleteInstanceGroup(del)
|
||||
errMsg += cont.deleteNetworkEndpointGroup(del)
|
||||
errMsg += cont.deleteFirewallRule(del)
|
||||
errMsg += cont.deleteSSLCertificate(del)
|
||||
|
||||
// TODO: Verify instance-groups, issue #16636. Gcloud mysteriously barfs when told
|
||||
// to unmarshal instance groups into the current vendored gce-client's understanding
|
||||
// of the struct.
|
||||
if errMsg == "" {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
// Init initializes the GCEIngressController with an UID
|
||||
func (cont *GCEIngressController) Init() error {
|
||||
uid, err := cont.getL7AddonUID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cont.UID = uid
|
||||
// There's a name limit imposed by GCE. The controller will truncate.
|
||||
testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.Ns, cont.UID)
|
||||
if len(testName) > nameLenLimit {
|
||||
framework.Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit)
|
||||
} else {
|
||||
framework.Logf("Detected cluster UID %v", cont.UID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateStaticIP allocates a random static ip with the given name. Returns a string
|
||||
// representation of the ip. Caller is expected to manage cleanup of the ip by
|
||||
// invoking deleteStaticIPs.
|
||||
func (cont *GCEIngressController) CreateStaticIP(name string) string {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
addr := &compute.Address{Name: name}
|
||||
if err := gceCloud.ReserveGlobalAddress(addr); err != nil {
|
||||
if delErr := gceCloud.DeleteGlobalAddress(name); delErr != nil {
|
||||
if cont.isHTTPErrorCode(delErr, http.StatusNotFound) {
|
||||
framework.Logf("Static ip with name %v was not allocated, nothing to delete", name)
|
||||
} else {
|
||||
framework.Logf("Failed to delete static ip %v: %v", name, delErr)
|
||||
}
|
||||
}
|
||||
framework.Failf("Failed to allocate static ip %v: %v", name, err)
|
||||
}
|
||||
|
||||
ip, err := gceCloud.GetGlobalAddress(name)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get newly created static ip %v: %v", name, err)
|
||||
}
|
||||
|
||||
cont.staticIPName = ip.Name
|
||||
framework.Logf("Reserved static ip %v: %v", cont.staticIPName, ip.Address)
|
||||
return ip.Address
|
||||
}
|
||||
|
||||
// deleteStaticIPs delets all static-ips allocated through calls to
|
||||
// CreateStaticIP.
|
||||
func (cont *GCEIngressController) deleteStaticIPs() error {
|
||||
if cont.staticIPName != "" {
|
||||
if err := GcloudComputeResourceDelete("addresses", cont.staticIPName, cont.Cloud.ProjectID, "--global"); err == nil {
|
||||
cont.staticIPName = ""
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
e2eIPs := []compute.Address{}
|
||||
gcloudComputeResourceList("addresses", "e2e-.*", cont.Cloud.ProjectID, &e2eIPs)
|
||||
ips := []string{}
|
||||
for _, ip := range e2eIPs {
|
||||
ips = append(ips, ip.Name)
|
||||
}
|
||||
framework.Logf("None of the remaining %d static-ips were created by this e2e: %v", len(ips), strings.Join(ips, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gcloudComputeResourceList unmarshals json output of gcloud into given out interface.
|
||||
func gcloudComputeResourceList(resource, regex, project string, out interface{}) {
|
||||
// gcloud prints a message to stderr if it has an available update
|
||||
// so we only look at stdout.
|
||||
command := []string{
|
||||
"compute", resource, "list",
|
||||
fmt.Sprintf("--filter='name ~ \"%q\"'", regex),
|
||||
fmt.Sprintf("--project=%v", project),
|
||||
"-q", "--format=json",
|
||||
}
|
||||
output, err := exec.Command("gcloud", command...).Output()
|
||||
if err != nil {
|
||||
errCode := -1
|
||||
errMsg := ""
|
||||
if exitErr, ok := err.(utilexec.ExitError); ok {
|
||||
errCode = exitErr.ExitStatus()
|
||||
errMsg = exitErr.Error()
|
||||
if osExitErr, ok := err.(*exec.ExitError); ok {
|
||||
errMsg = fmt.Sprintf("%v, stderr %v", errMsg, string(osExitErr.Stderr))
|
||||
}
|
||||
}
|
||||
framework.Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d, msg: %v", strings.Join(command, " "), err, string(output), errCode, errMsg)
|
||||
}
|
||||
if err := json.Unmarshal([]byte(output), out); err != nil {
|
||||
framework.Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output))
|
||||
}
|
||||
}
|
||||
|
||||
// GcloudComputeResourceDelete deletes the specified compute resource by name and project.
|
||||
func GcloudComputeResourceDelete(resource, name, project string, args ...string) error {
|
||||
framework.Logf("Deleting %v: %v", resource, name)
|
||||
argList := append([]string{"compute", resource, "delete", name, fmt.Sprintf("--project=%v", project), "-q"}, args...)
|
||||
output, err := exec.Command("gcloud", argList...).CombinedOutput()
|
||||
if err != nil {
|
||||
framework.Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GcloudComputeResourceCreate creates a compute resource with a name and arguments.
|
||||
func GcloudComputeResourceCreate(resource, name, project string, args ...string) error {
|
||||
framework.Logf("Creating %v in project %v: %v", resource, project, name)
|
||||
argsList := append([]string{"compute", resource, "create", name, fmt.Sprintf("--project=%v", project)}, args...)
|
||||
framework.Logf("Running command: gcloud %+v", strings.Join(argsList, " "))
|
||||
output, err := exec.Command("gcloud", argsList...).CombinedOutput()
|
||||
if err != nil {
|
||||
framework.Logf("Error creating %v, output: %v\nerror: %+v", resource, string(output), err)
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubemark
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var (
|
||||
kubemarkExternalKubeConfig = flag.String(fmt.Sprintf("%s-%s", "kubemark-external", clientcmd.RecommendedConfigPathFlag), "", "Path to kubeconfig containing embedded authinfo for external cluster.")
|
||||
)
|
||||
|
||||
func init() {
|
||||
framework.RegisterProvider("kubemark", NewProvider)
|
||||
}
|
||||
|
||||
func NewProvider() (framework.ProviderInterface, error) {
|
||||
// Actual initialization happens when the e2e framework gets constructed.
|
||||
return &Provider{}, nil
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
framework.NullProvider
|
||||
controller *kubemark.KubemarkController
|
||||
closeChannel chan struct{}
|
||||
}
|
||||
|
||||
func (p *Provider) ResizeGroup(group string, size int32) error {
|
||||
return p.controller.SetNodeGroupSize(group, int(size))
|
||||
}
|
||||
|
||||
func (p *Provider) GetGroupNodes(group string) ([]string, error) {
|
||||
return p.controller.GetNodeNamesForNodeGroup(group)
|
||||
}
|
||||
|
||||
func (p *Provider) FrameworkBeforeEach(f *framework.Framework) {
|
||||
if *kubemarkExternalKubeConfig != "" && p.controller == nil {
|
||||
externalConfig, err := clientcmd.BuildConfigFromFlags("", *kubemarkExternalKubeConfig)
|
||||
externalConfig.QPS = f.Options.ClientQPS
|
||||
externalConfig.Burst = f.Options.ClientBurst
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalClient, err := clientset.NewForConfig(externalConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.KubemarkExternalClusterClientSet = externalClient
|
||||
p.closeChannel = make(chan struct{})
|
||||
externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0)
|
||||
kubemarkInformerFactory := informers.NewSharedInformerFactory(f.ClientSet, 0)
|
||||
kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes()
|
||||
go kubemarkNodeInformer.Informer().Run(p.closeChannel)
|
||||
p.controller, err = kubemark.NewKubemarkController(externalClient, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalInformerFactory.Start(p.closeChannel)
|
||||
Expect(p.controller.WaitForCacheSync(p.closeChannel)).To(BeTrue())
|
||||
go p.controller.Run(p.closeChannel)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) FrameworkAfterEach(f *framework.Framework) {
|
||||
if p.closeChannel != nil {
|
||||
close(p.closeChannel)
|
||||
p.controller = nil
|
||||
p.closeChannel = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) GroupSize(group string) (int, error) {
|
||||
return p.controller.GetNodeGroupSize(group)
|
||||
}
|
|
@ -18,16 +18,9 @@ package framework
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
@ -36,8 +29,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
@ -557,19 +548,6 @@ func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc
|
|||
return // note: named return value
|
||||
}
|
||||
|
||||
// Sanity check for GCE testing. Verify the persistent disk attached to the node.
|
||||
func VerifyGCEDiskAttached(diskName string, nodeName types.NodeName) (bool, error) {
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("GetGCECloud error: %v", err)
|
||||
}
|
||||
isAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot verify if GCE disk is attached: %v", err)
|
||||
}
|
||||
return isAttached, nil
|
||||
}
|
||||
|
||||
// Return a pvckey struct.
|
||||
func makePvcKey(ns, name string) types.NamespacedName {
|
||||
return types.NamespacedName{Namespace: ns, Name: name}
|
||||
|
@ -690,131 +668,15 @@ func DeletePDWithRetry(diskName string) error {
|
|||
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
|
||||
}
|
||||
|
||||
func newAWSClient(zone string) *ec2.EC2 {
|
||||
var cfg *aws.Config
|
||||
|
||||
if zone == "" {
|
||||
zone = TestContext.CloudConfig.Zone
|
||||
}
|
||||
if zone == "" {
|
||||
glog.Warning("No AWS zone configured!")
|
||||
cfg = nil
|
||||
} else {
|
||||
region := zone[:len(zone)-1]
|
||||
cfg = &aws.Config{Region: aws.String(region)}
|
||||
}
|
||||
return ec2.New(session.New(), cfg)
|
||||
}
|
||||
|
||||
func createPD(zone string) (string, error) {
|
||||
if zone == "" {
|
||||
zone = TestContext.CloudConfig.Zone
|
||||
}
|
||||
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID()))
|
||||
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if zone == "" && TestContext.CloudConfig.MultiZone {
|
||||
zones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
zone, _ = zones.PopAny()
|
||||
}
|
||||
|
||||
tags := map[string]string{}
|
||||
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeStandard, zone, 2 /* sizeGb */, tags)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return pdName, nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := newAWSClient(zone)
|
||||
request := &ec2.CreateVolumeInput{}
|
||||
request.AvailabilityZone = aws.String(zone)
|
||||
request.Size = aws.Int64(10)
|
||||
request.VolumeType = aws.String(awscloud.DefaultVolumeType)
|
||||
response, err := client.CreateVolume(request)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
az := aws.StringValue(response.AvailabilityZone)
|
||||
awsID := aws.StringValue(response.VolumeId)
|
||||
|
||||
volumeName := "aws://" + az + "/" + awsID
|
||||
return volumeName, nil
|
||||
} else if TestContext.Provider == "azure" {
|
||||
pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID()))
|
||||
azureCloud, err := GetAzureCloud()
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, diskURI, _, err := azureCloud.CreateVolume(pdName, "" /* account */, "" /* sku */, "" /* location */, 1 /* sizeGb */)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return diskURI, nil
|
||||
} else {
|
||||
return "", fmt.Errorf("provider does not support volume creation")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.CreatePD(zone)
|
||||
}
|
||||
|
||||
func deletePD(pdName string) error {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gceCloud.DeleteDisk(pdName)
|
||||
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" {
|
||||
// PD already exists, ignore error.
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("error deleting PD %q: %v", pdName, err)
|
||||
}
|
||||
return err
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := newAWSClient("")
|
||||
|
||||
tokens := strings.Split(pdName, "/")
|
||||
awsVolumeID := tokens[len(tokens)-1]
|
||||
|
||||
request := &ec2.DeleteVolumeInput{VolumeId: aws.String(awsVolumeID)}
|
||||
_, err := client.DeleteVolume(request)
|
||||
if err != nil {
|
||||
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
|
||||
Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
|
||||
} else {
|
||||
return fmt.Errorf("error deleting EBS volumes: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
} else if TestContext.Provider == "azure" {
|
||||
azureCloud, err := GetAzureCloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = azureCloud.DeleteVolume(pdName)
|
||||
if err != nil {
|
||||
Logf("failed to delete Azure volume %q: %v", pdName, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("provider does not support volume deletion")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.DeletePD(pdName)
|
||||
}
|
||||
|
||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||
|
@ -1091,33 +953,9 @@ func CreatePVSource(zone string) (*v1.PersistentVolumeSource, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
return &v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}, nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
return &v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: diskName,
|
||||
FSType: "ext3",
|
||||
},
|
||||
}, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("Provider not supported")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.CreatePVSource(zone, diskName)
|
||||
}
|
||||
|
||||
func DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
return DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
|
||||
} else if TestContext.Provider == "aws" {
|
||||
return DeletePDWithRetry(pvSource.AWSElasticBlockStore.VolumeID)
|
||||
} else {
|
||||
return fmt.Errorf("Provider not supported")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.DeletePVSource(pvSource)
|
||||
}
|
||||
|
|
|
@ -40,8 +40,6 @@ import (
|
|||
"k8s.io/client-go/util/retry"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
azurecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
|
@ -1374,23 +1372,7 @@ func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceI
|
|||
}
|
||||
|
||||
func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
CleanupServiceGCEResources(c, loadBalancerName, region, zone)
|
||||
}
|
||||
|
||||
// TODO: we need to add this function with other cloud providers, if there is a need.
|
||||
}
|
||||
|
||||
func CleanupServiceGCEResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||
if pollErr := wait.Poll(5*time.Second, LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := CleanupGCEResources(c, loadBalancerName, region, zone); err != nil {
|
||||
Logf("Still waiting for glbc to cleanup: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); pollErr != nil {
|
||||
Failf("Failed to cleanup service GCE resources.")
|
||||
}
|
||||
TestContext.CloudConfig.Provider.CleanupServiceResources(c, loadBalancerName, region, zone)
|
||||
}
|
||||
|
||||
func DescribeSvc(ns string) {
|
||||
|
@ -1424,29 +1406,9 @@ func CreateServiceSpec(serviceName, externalName string, isHeadless bool, select
|
|||
}
|
||||
|
||||
// EnableAndDisableInternalLB returns two functions for enabling and disabling the internal load balancer
|
||||
// setting for the supported cloud providers: GCE/GKE and Azure
|
||||
// setting for the supported cloud providers (currently GCE/GKE and Azure) and empty functions for others.
|
||||
func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(svc *v1.Service)) {
|
||||
enable = func(svc *v1.Service) {}
|
||||
disable = func(svc *v1.Service) {}
|
||||
|
||||
switch TestContext.Provider {
|
||||
case "gce", "gke":
|
||||
enable = func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Annotations = map[string]string{gcecloud.ServiceAnnotationLoadBalancerType: string(gcecloud.LBTypeInternal)}
|
||||
}
|
||||
disable = func(svc *v1.Service) {
|
||||
delete(svc.ObjectMeta.Annotations, gcecloud.ServiceAnnotationLoadBalancerType)
|
||||
}
|
||||
case "azure":
|
||||
enable = func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Annotations = map[string]string{azurecloud.ServiceAnnotationLoadBalancerInternal: "true"}
|
||||
}
|
||||
disable = func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Annotations = map[string]string{azurecloud.ServiceAnnotationLoadBalancerInternal: "false"}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return TestContext.CloudConfig.Provider.EnableAndDisableInternalLB()
|
||||
}
|
||||
|
||||
func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration {
|
||||
|
|
|
@ -18,14 +18,7 @@ package framework
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -38,88 +31,15 @@ func ResizeGroup(group string, size int32) error {
|
|||
CoreDump(TestContext.ReportDir)
|
||||
defer CoreDump(TestContext.ReportDir)
|
||||
}
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize",
|
||||
group, fmt.Sprintf("--size=%v", size),
|
||||
"--project="+TestContext.CloudConfig.ProjectID, "--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to resize node instance group %s: %s", group, output)
|
||||
}
|
||||
return nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := autoscaling.New(session.New())
|
||||
return awscloud.ResizeInstanceGroup(client, group, int(size))
|
||||
} else if TestContext.Provider == "kubemark" {
|
||||
return TestContext.CloudConfig.KubemarkController.SetNodeGroupSize(group, int(size))
|
||||
} else {
|
||||
return fmt.Errorf("Provider does not support InstanceGroups")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.ResizeGroup(group, size)
|
||||
}
|
||||
|
||||
func GetGroupNodes(group string) ([]string, error) {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get nodes in instance group %s: %s", group, output)
|
||||
}
|
||||
re := regexp.MustCompile(".*RUNNING")
|
||||
lines := re.FindAllString(string(output), -1)
|
||||
for i, line := range lines {
|
||||
lines[i] = line[:strings.Index(line, " ")]
|
||||
}
|
||||
return lines, nil
|
||||
} else if TestContext.Provider == "kubemark" {
|
||||
return TestContext.CloudConfig.KubemarkController.GetNodeNamesForNodeGroup(group)
|
||||
} else {
|
||||
return nil, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.GetGroupNodes(group)
|
||||
}
|
||||
|
||||
func GroupSize(group string) (int, error) {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("Failed to get group size for group %s: %s", group, output)
|
||||
}
|
||||
re := regexp.MustCompile("RUNNING")
|
||||
return len(re.FindAllString(string(output), -1)), nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := autoscaling.New(session.New())
|
||||
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error describing instance group: %v", err)
|
||||
}
|
||||
if instanceGroup == nil {
|
||||
return -1, fmt.Errorf("instance group not found: %s", group)
|
||||
}
|
||||
return instanceGroup.CurrentSize()
|
||||
} else if TestContext.Provider == "kubemark" {
|
||||
return TestContext.CloudConfig.KubemarkController.GetNodeGroupSize(group)
|
||||
} else {
|
||||
return -1, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.GroupSize(group)
|
||||
}
|
||||
|
||||
func WaitForGroupSize(group string, size int32) error {
|
||||
|
@ -139,16 +59,3 @@ func WaitForGroupSize(group string, size int32) error {
|
|||
}
|
||||
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
|
||||
}
|
||||
|
||||
func getGCEZoneForGroup(group string) (string, error) {
|
||||
zone := TestContext.CloudConfig.Zone
|
||||
if TestContext.CloudConfig.MultiZone {
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "list",
|
||||
"--project="+TestContext.CloudConfig.ProjectID, "--format=value(zone)", "--filter=name="+group).CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to get zone for node group %s: %s", group, output)
|
||||
}
|
||||
zone = strings.TrimSpace(string(output))
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
|
|
@ -29,9 +29,7 @@ import (
|
|||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
)
|
||||
|
||||
const defaultHost = "http://127.0.0.1:8080"
|
||||
|
@ -65,13 +63,12 @@ const defaultHost = "http://127.0.0.1:8080"
|
|||
// Test suite authors can use framework/viper to make all command line
|
||||
// parameters also configurable via a configuration file.
|
||||
type TestContextType struct {
|
||||
KubeConfig string
|
||||
KubemarkExternalKubeConfig string
|
||||
KubeContext string
|
||||
KubeAPIContentType string
|
||||
KubeVolumeDir string
|
||||
CertDir string
|
||||
Host string
|
||||
KubeConfig string
|
||||
KubeContext string
|
||||
KubeAPIContentType string
|
||||
KubeVolumeDir string
|
||||
CertDir string
|
||||
Host string
|
||||
// TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987.
|
||||
RepoRoot string
|
||||
DockershimCheckpointDir string
|
||||
|
@ -187,8 +184,7 @@ type CloudConfig struct {
|
|||
NodeTag string
|
||||
MasterTag string
|
||||
|
||||
Provider cloudprovider.Interface
|
||||
KubemarkController *kubemark.KubemarkController
|
||||
Provider ProviderInterface
|
||||
}
|
||||
|
||||
var TestContext TestContextType
|
||||
|
@ -236,7 +232,6 @@ func RegisterCommonFlags() {
|
|||
func RegisterClusterFlags() {
|
||||
flag.BoolVar(&TestContext.VerifyServiceAccount, "e2e-verify-service-account", true, "If true tests will verify the service account before running.")
|
||||
flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to kubeconfig containing embedded authinfo.")
|
||||
flag.StringVar(&TestContext.KubemarkExternalKubeConfig, fmt.Sprintf("%s-%s", "kubemark-external", clientcmd.RecommendedConfigPathFlag), "", "Path to kubeconfig containing embedded authinfo for external cluster.")
|
||||
flag.StringVar(&TestContext.KubeContext, clientcmd.FlagContext, "", "kubeconfig context to use/override. If unset, will use value from 'current-context'")
|
||||
flag.StringVar(&TestContext.KubeAPIContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "ContentType used to communicate with apiserver")
|
||||
|
||||
|
@ -367,4 +362,11 @@ func AfterReadingAllFlags(t *TestContextType) {
|
|||
if t.AllowedNotReadyNodes == 0 {
|
||||
t.AllowedNotReadyNodes = t.CloudConfig.NumNodes / 100
|
||||
}
|
||||
|
||||
// Make sure that all test runs have a valid TestContext.CloudConfig.Provider.
|
||||
var err error
|
||||
TestContext.CloudConfig.Provider, err = SetupProviderConfig(TestContext.Provider)
|
||||
if err != nil {
|
||||
Failf("Failed to setup provide r config: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,7 +44,6 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/net/websocket"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -84,8 +83,6 @@ import (
|
|||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
"k8s.io/kubernetes/pkg/controller/service"
|
||||
|
@ -4443,48 +4440,10 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
|
|||
return string(logs), err
|
||||
}
|
||||
|
||||
func GetGCECloud() (*gcecloud.GCECloud, error) {
|
||||
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
|
||||
}
|
||||
return gceCloud, nil
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
|
||||
// are actually cleaned up. Currently only implemented for GCE/GKE.
|
||||
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
return ensureGCELoadBalancerResourcesDeleted(ip, portRange)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
project := TestContext.CloudConfig.ProjectID
|
||||
region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err)
|
||||
}
|
||||
|
||||
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
service := gceCloud.ComputeServices().GA
|
||||
list, err := service.ForwardingRules.List(project, region).Do()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, item := range list.Items {
|
||||
if item.PortRange == portRange && item.IPAddress == ip {
|
||||
Logf("found a load balancer: %v", item)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return TestContext.CloudConfig.Provider.EnsureLoadBalancerResourcesDeleted(ip, portRange)
|
||||
}
|
||||
|
||||
// The following helper functions can block/unblock network from source
|
||||
|
@ -4943,78 +4902,6 @@ func (p *E2ETestNodePreparer) CleanupNodes() error {
|
|||
return encounteredError
|
||||
}
|
||||
|
||||
func GetClusterID(c clientset.Interface) (string, error) {
|
||||
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
|
||||
if err != nil || cm == nil {
|
||||
return "", fmt.Errorf("error getting cluster ID: %v", err)
|
||||
}
|
||||
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
|
||||
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
|
||||
if !clusterIDExists {
|
||||
return "", fmt.Errorf("cluster ID not set")
|
||||
}
|
||||
if providerIDExists {
|
||||
return providerID, nil
|
||||
}
|
||||
return clusterID, nil
|
||||
}
|
||||
|
||||
// CleanupGCEResources cleans up GCE Service Type=LoadBalancer resources with
|
||||
// the given name. The name is usually the UUID of the Service prefixed with an
|
||||
// alpha-numeric character ('a') to work around cloudprovider rules.
|
||||
func CleanupGCEResources(c clientset.Interface, loadBalancerName, region, zone string) (retErr error) {
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if region == "" {
|
||||
// Attempt to parse region from zone if no region is given.
|
||||
region, err = gcecloud.GetGCERegion(zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
|
||||
}
|
||||
}
|
||||
if err := gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = err
|
||||
}
|
||||
if err := gceCloud.DeleteRegionForwardingRule(loadBalancerName, region); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
|
||||
}
|
||||
if err := gceCloud.DeleteRegionAddress(loadBalancerName, region); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
}
|
||||
clusterID, err := GetClusterID(c)
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
return
|
||||
}
|
||||
hcNames := []string{gcecloud.MakeNodesHealthCheckName(clusterID)}
|
||||
hc, getErr := gceCloud.GetHttpHealthCheck(loadBalancerName)
|
||||
if getErr != nil && !IsGoogleAPIHTTPErrorCode(getErr, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, getErr)
|
||||
return
|
||||
}
|
||||
if hc != nil {
|
||||
hcNames = append(hcNames, hc.Name)
|
||||
}
|
||||
if err := gceCloud.DeleteExternalTargetPoolAndChecks(&v1.Service{}, loadBalancerName, region, clusterID, hcNames...); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// IsHTTPErrorCode returns true if the error is a google api
|
||||
// error matching the corresponding HTTP error code.
|
||||
func IsGoogleAPIHTTPErrorCode(err error, code int) bool {
|
||||
apiErr, ok := err.(*googleapi.Error)
|
||||
return ok && apiErr.Code == code
|
||||
}
|
||||
|
||||
// getMaster populates the externalIP, internalIP and hostname fields of the master.
|
||||
// If any of these is unavailable, it is set to "".
|
||||
func getMaster(c clientset.Interface) Address {
|
||||
|
@ -5173,15 +5060,6 @@ func CreateEmptyFileOnPod(namespace string, podName string, filePath string) err
|
|||
return err
|
||||
}
|
||||
|
||||
// GetAzureCloud returns azure cloud provider
|
||||
func GetAzureCloud() (*azure.Cloud, error) {
|
||||
cloud, ok := TestContext.CloudConfig.Provider.(*azure.Cloud)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to Azure: %#v", TestContext.CloudConfig.Provider)
|
||||
}
|
||||
return cloud, nil
|
||||
}
|
||||
|
||||
func PrintSummaries(summaries []TestDataSummary, testBaseName string) {
|
||||
now := time.Now()
|
||||
for i := range summaries {
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -42,9 +43,12 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
|
||||
var err error
|
||||
cs = f.ClientSet
|
||||
cloudConfig = framework.TestContext.CloudConfig
|
||||
gceCloud = cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
gceCloud, err = gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// This test takes around 6 minutes to run
|
||||
|
@ -55,7 +59,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||
serviceName := "firewall-test-loadbalancer"
|
||||
|
||||
By("Getting cluster ID")
|
||||
clusterID, err := framework.GetClusterID(cs)
|
||||
clusterID, err := gce.GetClusterID(cs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Got cluster ID: %v", clusterID)
|
||||
|
||||
|
@ -70,7 +74,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||
|
||||
By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global")
|
||||
svc := jig.CreateLoadBalancerService(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) {
|
||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: framework.FirewallTestHttpPort}}
|
||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: gce.FirewallTestHttpPort}}
|
||||
svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges
|
||||
})
|
||||
defer func() {
|
||||
|
@ -80,23 +84,23 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||
})
|
||||
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
|
||||
By("Waiting for the local traffic health check firewall rule to be deleted")
|
||||
localHCFwName := framework.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false)
|
||||
_, err := framework.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout)
|
||||
localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false)
|
||||
_, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP
|
||||
|
||||
By("Checking if service's firewall rule is correct")
|
||||
lbFw := framework.ConstructFirewallForLBService(svc, cloudConfig.NodeTag)
|
||||
lbFw := gce.ConstructFirewallForLBService(svc, cloudConfig.NodeTag)
|
||||
fw, err := gceCloud.GetFirewall(lbFw.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(framework.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
Expect(gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking if service's nodes health check firewall rule is correct")
|
||||
nodesHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true)
|
||||
nodesHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true)
|
||||
fw, err = gceCloud.GetFirewall(nodesHCFw.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(framework.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
Expect(gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
|
||||
// OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE
|
||||
By("Updating LoadBalancer service to ExternalTrafficPolicy=Local")
|
||||
|
@ -105,19 +109,19 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||
})
|
||||
|
||||
By("Waiting for the nodes health check firewall rule to be deleted")
|
||||
_, err = framework.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout)
|
||||
_, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for the correct local traffic health check firewall rule to be created")
|
||||
localHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)
|
||||
fw, err = framework.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault)
|
||||
localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)
|
||||
fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(framework.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
Expect(gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests))
|
||||
for i, nodeName := range nodesNames {
|
||||
podName := fmt.Sprintf("netexec%v", i)
|
||||
jig.LaunchNetexecPodOnNode(f, nodeName, podName, framework.FirewallTestHttpPort, framework.FirewallTestUdpPort, true)
|
||||
jig.LaunchNetexecPodOnNode(f, nodeName, podName, gce.FirewallTestHttpPort, gce.FirewallTestUdpPort, true)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the netexec pod: %v", podName)
|
||||
Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred())
|
||||
|
@ -126,7 +130,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||
|
||||
// Send requests from outside of the cluster because internal traffic is whitelisted
|
||||
By("Accessing the external service ip from outside, all non-master nodes should be reached")
|
||||
Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
|
||||
Expect(framework.TestHitNodesFromOutside(svcExternalIP, gce.FirewallTestHttpPort, gce.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
|
||||
|
||||
// Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster
|
||||
// by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect
|
||||
|
@ -140,17 +144,17 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||
if zoneInLabel, ok := nodeList.Items[0].Labels[kubeletapis.LabelZoneFailureDomain]; ok {
|
||||
zone = zoneInLabel
|
||||
}
|
||||
removedTags := framework.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{})
|
||||
removedTags := gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{})
|
||||
defer func() {
|
||||
By("Adding tags back to the node and wait till the traffic is recovered")
|
||||
nodesSet.Insert(nodesNames[0])
|
||||
framework.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
|
||||
gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
|
||||
// Make sure traffic is recovered before exit
|
||||
Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
|
||||
Expect(framework.TestHitNodesFromOutside(svcExternalIP, gce.FirewallTestHttpPort, gce.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Accessing serivce through the external ip and examine got no response from the node without tags")
|
||||
Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred())
|
||||
Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, gce.FirewallTestHttpPort, gce.FirewallTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should have correct firewall rules for e2e cluster", func() {
|
||||
|
@ -160,25 +164,25 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||
}
|
||||
|
||||
By("Checking if e2e firewall rules are correct")
|
||||
for _, expFw := range framework.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) {
|
||||
for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) {
|
||||
fw, err := gceCloud.GetFirewall(expFw.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(framework.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
Expect(gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
By("Checking well known ports on master and nodes are not exposed externally")
|
||||
nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP)
|
||||
Expect(len(nodeAddrs)).NotTo(BeZero())
|
||||
masterAddr := framework.GetMasterAddress(cs)
|
||||
flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.InsecureKubeControllerManagerPort, framework.FirewallTestTcpTimeout)
|
||||
flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.InsecureKubeControllerManagerPort, gce.FirewallTestTcpTimeout)
|
||||
Expect(flag).To(BeTrue())
|
||||
flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, framework.FirewallTestTcpTimeout)
|
||||
flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, gce.FirewallTestTcpTimeout)
|
||||
Expect(flag).To(BeTrue())
|
||||
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, framework.FirewallTestTcpTimeout)
|
||||
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, gce.FirewallTestTcpTimeout)
|
||||
Expect(flag).To(BeTrue())
|
||||
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, framework.FirewallTestTcpTimeout)
|
||||
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, gce.FirewallTestTcpTimeout)
|
||||
Expect(flag).To(BeTrue())
|
||||
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, framework.FirewallTestTcpTimeout)
|
||||
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, gce.FirewallTestTcpTimeout)
|
||||
Expect(flag).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
|
|
@ -35,8 +35,9 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/ingress"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -46,14 +47,14 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
defer GinkgoRecover()
|
||||
var (
|
||||
ns string
|
||||
jig *framework.IngressTestJig
|
||||
conformanceTests []framework.IngressConformanceTests
|
||||
jig *ingress.IngressTestJig
|
||||
conformanceTests []ingress.IngressConformanceTests
|
||||
cloudConfig framework.CloudConfig
|
||||
)
|
||||
f := framework.NewDefaultFramework("ingress")
|
||||
|
||||
BeforeEach(func() {
|
||||
jig = framework.NewIngressTestJig(f.ClientSet)
|
||||
jig = ingress.NewIngressTestJig(f.ClientSet)
|
||||
ns = f.Namespace.Name
|
||||
cloudConfig = framework.TestContext.CloudConfig
|
||||
|
||||
|
@ -76,13 +77,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
// Slow by design ~10m for each "It" block dominated by loadbalancer setup time
|
||||
// TODO: write similar tests for nginx, haproxy and AWS Ingress.
|
||||
Describe("GCE [Slow] [Feature:Ingress]", func() {
|
||||
var gceController *framework.GCEIngressController
|
||||
var gceController *gce.GCEIngressController
|
||||
|
||||
// Platform specific setup
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
By("Initializing gce controller")
|
||||
gceController = &framework.GCEIngressController{
|
||||
gceController = &gce.GCEIngressController{
|
||||
Ns: ns,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
|
@ -108,7 +109,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{})
|
||||
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
|
||||
for _, t := range conformanceTests {
|
||||
By(t.EntryLog)
|
||||
t.Execute()
|
||||
|
@ -128,13 +129,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
nodeTags := []string{cloudConfig.NodeTag}
|
||||
if framework.TestContext.Provider != "gce" {
|
||||
// nodeTags would be different in GKE.
|
||||
nodeTags = framework.GetNodeTags(jig.Client, cloudConfig)
|
||||
nodeTags = gce.GetNodeTags(jig.Client, cloudConfig)
|
||||
}
|
||||
expFw := jig.ConstructFirewallForIngress(gceController, nodeTags)
|
||||
expFw := jig.ConstructFirewallForIngress(gceController.GetFirewallRuleName(), nodeTags)
|
||||
// Passed the last argument as `true` to verify the backend ports is a subset
|
||||
// of the allowed ports in firewall rule, given there may be other existing
|
||||
// ingress resources and backends we are not aware of.
|
||||
Expect(framework.VerifyFirewallRule(fw, expFw, gceController.Cloud.Network, true)).NotTo(HaveOccurred())
|
||||
Expect(gce.VerifyFirewallRule(fw, expFw, gceController.Cloud.Network, true)).NotTo(HaveOccurred())
|
||||
|
||||
// TODO: uncomment the restart test once we have a way to synchronize
|
||||
// and know that the controller has resumed watching. If we delete
|
||||
|
@ -211,7 +212,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a basic HTTP ingress and wait for it to come up")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, nil, nil)
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, nil, nil)
|
||||
jig.WaitForIngress(true)
|
||||
|
||||
By("Updating the path on ingress and wait for it to take effect")
|
||||
|
@ -239,11 +240,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
|
||||
It("should not reconcile manually modified health check for ingress", func() {
|
||||
By("Creating a basic HTTP ingress and wait for it to come up.")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, nil, nil)
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, nil, nil)
|
||||
jig.WaitForIngress(true)
|
||||
|
||||
// Get cluster UID.
|
||||
clusterID, err := framework.GetClusterID(f.ClientSet)
|
||||
clusterID, err := gce.GetClusterID(f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Get the related nodeports.
|
||||
nodePorts := jig.GetIngressNodePorts(false)
|
||||
|
@ -251,7 +252,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
|
||||
// Filter health check using cluster UID as the suffix.
|
||||
By("Retrieving relevant health check resources from GCE.")
|
||||
gceCloud := gceController.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
hcs, err := gceCloud.ListHealthChecks()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var hcToChange *compute.HealthCheck
|
||||
|
@ -314,8 +316,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
|
||||
It("should support multiple TLS certs", func() {
|
||||
By("Creating an ingress with no certs.")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "multiple-certs"), ns, map[string]string{
|
||||
framework.IngressStaticIPKey: ns,
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{
|
||||
ingress.IngressStaticIPKey: ns,
|
||||
}, map[string]string{})
|
||||
|
||||
By("Adding multiple certs to the ingress.")
|
||||
|
@ -350,8 +352,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
|
||||
It("multicluster ingress should get instance group annotation", func() {
|
||||
name := "echomap"
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, map[string]string{
|
||||
framework.IngressClassKey: framework.MulticlusterIngressClassValue,
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, map[string]string{
|
||||
ingress.IngressClassKey: ingress.MulticlusterIngressClassValue,
|
||||
}, map[string]string{})
|
||||
|
||||
By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name))
|
||||
|
@ -359,25 +361,25 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
annotations := ing.Annotations
|
||||
if annotations == nil || annotations[framework.InstanceGroupAnnotation] == "" {
|
||||
framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", framework.InstanceGroupAnnotation, annotations)
|
||||
if annotations == nil || annotations[ingress.InstanceGroupAnnotation] == "" {
|
||||
framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", ingress.InstanceGroupAnnotation, annotations)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, framework.InstanceGroupAnnotation))
|
||||
framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, ingress.InstanceGroupAnnotation))
|
||||
}
|
||||
|
||||
// Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc.
|
||||
// Note: All resources except the firewall rule have an annotation.
|
||||
umKey := framework.StatusPrefix + "/url-map"
|
||||
fwKey := framework.StatusPrefix + "/forwarding-rule"
|
||||
tpKey := framework.StatusPrefix + "/target-proxy"
|
||||
fwsKey := framework.StatusPrefix + "/https-forwarding-rule"
|
||||
tpsKey := framework.StatusPrefix + "/https-target-proxy"
|
||||
scKey := framework.StatusPrefix + "/ssl-cert"
|
||||
beKey := framework.StatusPrefix + "/backends"
|
||||
umKey := ingress.StatusPrefix + "/url-map"
|
||||
fwKey := ingress.StatusPrefix + "/forwarding-rule"
|
||||
tpKey := ingress.StatusPrefix + "/target-proxy"
|
||||
fwsKey := ingress.StatusPrefix + "/https-forwarding-rule"
|
||||
tpsKey := ingress.StatusPrefix + "/https-target-proxy"
|
||||
scKey := ingress.StatusPrefix + "/ssl-cert"
|
||||
beKey := ingress.StatusPrefix + "/backends"
|
||||
wait.Poll(2*time.Second, time.Minute, func() (bool, error) {
|
||||
ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -423,7 +425,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
httpsScheme := "request_scheme=https"
|
||||
|
||||
By("Create a basic HTTP2 ingress")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http2"), ns, map[string]string{}, map[string]string{})
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http2"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
|
||||
address, err := jig.WaitForIngressAddress(jig.Client, jig.Ingress.Namespace, jig.Ingress.Name, framework.LoadBalancerPollTimeout)
|
||||
|
@ -435,7 +437,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[framework.ServiceApplicationProtocolKey] = `{"http2":"HTTPS"}`
|
||||
svc.Annotations[ingress.ServiceApplicationProtocolKey] = `{"http2":"HTTPS"}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
@ -445,7 +447,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[framework.ServiceApplicationProtocolKey] = `{"http2":"HTTP2"}`
|
||||
svc.Annotations[ingress.ServiceApplicationProtocolKey] = `{"http2":"HTTP2"}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
@ -457,13 +459,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
})
|
||||
|
||||
Describe("GCE [Slow] [Feature:NEG]", func() {
|
||||
var gceController *framework.GCEIngressController
|
||||
var gceController *gce.GCEIngressController
|
||||
|
||||
// Platform specific setup
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
By("Initializing gce controller")
|
||||
gceController = &framework.GCEIngressController{
|
||||
gceController = &gce.GCEIngressController{
|
||||
Ns: ns,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
|
@ -490,8 +492,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
|
||||
It("should conform to Ingress spec", func() {
|
||||
jig.PollInterval = 5 * time.Second
|
||||
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{
|
||||
framework.NEGAnnotation: `{"ingress": true}`,
|
||||
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{
|
||||
ingress.NEGAnnotation: `{"ingress": true}`,
|
||||
})
|
||||
for _, t := range conformanceTests {
|
||||
By(t.EntryLog)
|
||||
|
@ -507,7 +509,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
It("should be able to switch between IG and NEG modes", func() {
|
||||
var err error
|
||||
By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -517,7 +519,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[framework.NEGAnnotation] = `{"ingress": false}`
|
||||
svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
@ -530,7 +532,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[framework.NEGAnnotation] = `{"ingress": true}`
|
||||
svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
@ -543,7 +545,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
It("should be able to create a ClusterIP service", func() {
|
||||
var err error
|
||||
By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
svcPorts := jig.GetServicePorts(false)
|
||||
usingNEG, err := gceController.BackendServiceUsingNEG(svcPorts)
|
||||
|
@ -566,7 +568,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
wait.Poll(10*time.Second, framework.NEGUpdateTimeout, func() (bool, error) {
|
||||
wait.Poll(10*time.Second, ingress.NEGUpdateTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress()
|
||||
if err != nil {
|
||||
return false, nil
|
||||
|
@ -576,7 +578,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
}
|
||||
|
||||
By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -601,7 +603,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
name := "hostname"
|
||||
replicas := 8
|
||||
By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -661,15 +663,15 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
wait.Poll(10*time.Second, framework.NEGUpdateTimeout, func() (bool, error) {
|
||||
wait.Poll(10*time.Second, ingress.NEGUpdateTimeout, func() (bool, error) {
|
||||
svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var status framework.NegStatus
|
||||
v, ok := svc.Annotations[framework.NEGStatusAnnotation]
|
||||
var status ingress.NegStatus
|
||||
v, ok := svc.Annotations[ingress.NEGStatusAnnotation]
|
||||
if !ok {
|
||||
// Wait for NEG sync loop to find NEGs
|
||||
framework.Logf("Waiting for %v, got: %+v", framework.NEGStatusAnnotation, svc.Annotations)
|
||||
framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
|
||||
return false, nil
|
||||
}
|
||||
err = json.Unmarshal([]byte(v), &status)
|
||||
|
@ -677,7 +679,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Got %v: %v", framework.NEGStatusAnnotation, v)
|
||||
framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
|
||||
|
||||
// Expect 2 NEGs to be created based on the test setup (neg-exposed)
|
||||
if len(status.NetworkEndpointGroups) != 2 {
|
||||
|
@ -695,7 +697,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
framework.Logf("Expected length of %+v to equal length of %+v, but does not", status.NetworkEndpointGroups, expectedKeys)
|
||||
}
|
||||
|
||||
gceCloud := gceController.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, neg := range status.NetworkEndpointGroups {
|
||||
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -710,7 +713,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
}
|
||||
|
||||
By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -733,16 +736,16 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
})
|
||||
|
||||
Describe("GCE [Slow] [Feature:kubemci]", func() {
|
||||
var gceController *framework.GCEIngressController
|
||||
var gceController *gce.GCEIngressController
|
||||
var ipName, ipAddress string
|
||||
|
||||
// Platform specific setup
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
jig.Class = framework.MulticlusterIngressClassValue
|
||||
jig.Class = ingress.MulticlusterIngressClassValue
|
||||
jig.PollInterval = 5 * time.Second
|
||||
By("Initializing gce controller")
|
||||
gceController = &framework.GCEIngressController{
|
||||
gceController = &gce.GCEIngressController{
|
||||
Ns: ns,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
|
@ -775,8 +778,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{
|
||||
framework.IngressStaticIPKey: ipName,
|
||||
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{
|
||||
ingress.IngressStaticIPKey: ipName,
|
||||
})
|
||||
for _, t := range conformanceTests {
|
||||
By(t.EntryLog)
|
||||
|
@ -800,9 +803,9 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
|
||||
It("should remove clusters as expected", func() {
|
||||
ingAnnotations := map[string]string{
|
||||
framework.IngressStaticIPKey: ipName,
|
||||
ingress.IngressStaticIPKey: ipName,
|
||||
}
|
||||
ingFilePath := filepath.Join(framework.IngressManifestPath, "http")
|
||||
ingFilePath := filepath.Join(ingress.IngressManifestPath, "http")
|
||||
jig.CreateIngress(ingFilePath, ns, ingAnnotations, map[string]string{})
|
||||
jig.WaitForIngress(false /*waitForNodePort*/)
|
||||
name := jig.Ingress.Name
|
||||
|
@ -830,7 +833,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
It("single and multi-cluster ingresses should be able to exist together", func() {
|
||||
By("Creating a single cluster ingress first")
|
||||
jig.Class = ""
|
||||
singleIngFilePath := filepath.Join(framework.IngressManifestPath, "static-ip-2")
|
||||
singleIngFilePath := filepath.Join(ingress.IngressManifestPath, "static-ip-2")
|
||||
jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(false /*waitForNodePort*/)
|
||||
// jig.Ingress will be overwritten when we create MCI, so keep a reference.
|
||||
|
@ -838,11 +841,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
|
||||
// Create the multi-cluster ingress next.
|
||||
By("Creating a multi-cluster ingress next")
|
||||
jig.Class = framework.MulticlusterIngressClassValue
|
||||
jig.Class = ingress.MulticlusterIngressClassValue
|
||||
ingAnnotations := map[string]string{
|
||||
framework.IngressStaticIPKey: ipName,
|
||||
ingress.IngressStaticIPKey: ipName,
|
||||
}
|
||||
multiIngFilePath := filepath.Join(framework.IngressManifestPath, "http")
|
||||
multiIngFilePath := filepath.Join(ingress.IngressManifestPath, "http")
|
||||
jig.CreateIngress(multiIngFilePath, ns, ingAnnotations, map[string]string{})
|
||||
jig.WaitForIngress(false /*waitForNodePort*/)
|
||||
mciIngress := jig.Ingress
|
||||
|
@ -852,7 +855,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
jig.Class = ""
|
||||
jig.TryDeleteIngress()
|
||||
jig.Ingress = mciIngress
|
||||
jig.Class = framework.MulticlusterIngressClassValue
|
||||
jig.Class = ingress.MulticlusterIngressClassValue
|
||||
jig.WaitForIngress(false /*waitForNodePort*/)
|
||||
|
||||
By("Cleanup: Deleting the multi-cluster ingress")
|
||||
|
@ -862,19 +865,19 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
|
||||
// Time: borderline 5m, slow by design
|
||||
Describe("[Slow] Nginx", func() {
|
||||
var nginxController *framework.NginxIngressController
|
||||
var nginxController *ingress.NginxIngressController
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
By("Initializing nginx controller")
|
||||
jig.Class = "nginx"
|
||||
nginxController = &framework.NginxIngressController{Ns: ns, Client: jig.Client}
|
||||
nginxController = &ingress.NginxIngressController{Ns: ns, Client: jig.Client}
|
||||
|
||||
// TODO: This test may fail on other platforms. We can simply skip it
|
||||
// but we want to allow easy testing where a user might've hand
|
||||
// configured firewalls.
|
||||
if framework.ProviderIs("gce", "gke") {
|
||||
framework.ExpectNoError(framework.GcloudComputeResourceCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network))
|
||||
framework.ExpectNoError(gce.GcloudComputeResourceCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network))
|
||||
} else {
|
||||
framework.Logf("WARNING: Not running on GCE/GKE, cannot create firewall rules for :80, :443. Assuming traffic can reach the external ips of all nodes in cluster on those ports.")
|
||||
}
|
||||
|
@ -884,7 +887,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
|
||||
AfterEach(func() {
|
||||
if framework.ProviderIs("gce", "gke") {
|
||||
framework.ExpectNoError(framework.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
|
||||
framework.ExpectNoError(gce.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
|
||||
}
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
framework.DescribeIng(ns)
|
||||
|
@ -901,7 +904,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
// Poll more frequently to reduce e2e completion time.
|
||||
// This test runs in presubmit.
|
||||
jig.PollInterval = 5 * time.Second
|
||||
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{})
|
||||
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
|
||||
for _, t := range conformanceTests {
|
||||
By(t.EntryLog)
|
||||
t.Execute()
|
||||
|
@ -923,13 +926,13 @@ func verifyKubemciStatusHas(name, expectedSubStr string) {
|
|||
}
|
||||
}
|
||||
|
||||
func executePresharedCertTest(f *framework.Framework, jig *framework.IngressTestJig, staticIPName string) {
|
||||
func executePresharedCertTest(f *framework.Framework, jig *ingress.IngressTestJig, staticIPName string) {
|
||||
preSharedCertName := "test-pre-shared-cert"
|
||||
By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName))
|
||||
testHostname := "test.ingress.com"
|
||||
cert, key, err := framework.GenerateRSACerts(testHostname, true)
|
||||
cert, key, err := ingress.GenerateRSACerts(testHostname, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
// We would not be able to delete the cert until ingress controller
|
||||
|
@ -959,36 +962,36 @@ func executePresharedCertTest(f *framework.Framework, jig *framework.IngressTest
|
|||
By("Creating an ingress referencing the pre-shared certificate")
|
||||
// Create an ingress referencing this cert using pre-shared-cert annotation.
|
||||
ingAnnotations := map[string]string{
|
||||
framework.IngressPreSharedCertKey: preSharedCertName,
|
||||
ingress.IngressPreSharedCertKey: preSharedCertName,
|
||||
// Disallow HTTP to save resources. This is irrelevant to the
|
||||
// pre-shared cert test.
|
||||
framework.IngressAllowHTTPKey: "false",
|
||||
ingress.IngressAllowHTTPKey: "false",
|
||||
}
|
||||
if staticIPName != "" {
|
||||
ingAnnotations[framework.IngressStaticIPKey] = staticIPName
|
||||
ingAnnotations[ingress.IngressStaticIPKey] = staticIPName
|
||||
}
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{})
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{})
|
||||
|
||||
By("Test that ingress works with the pre-shared certificate")
|
||||
err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
|
||||
}
|
||||
|
||||
func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *framework.IngressTestJig, ipName, ip string) {
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{
|
||||
framework.IngressStaticIPKey: ipName,
|
||||
framework.IngressAllowHTTPKey: "false",
|
||||
func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.IngressTestJig, ipName, ip string) {
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{
|
||||
ingress.IngressStaticIPKey: ipName,
|
||||
ingress.IngressAllowHTTPKey: "false",
|
||||
}, map[string]string{})
|
||||
|
||||
By("waiting for Ingress to come up with ip: " + ip)
|
||||
httpClient := framework.BuildInsecureClient(framework.IngressReqTimeout)
|
||||
httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout)
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
|
||||
|
||||
By("should reject HTTP traffic")
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
|
||||
}
|
||||
|
||||
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *framework.IngressTestJig, staticIPName string) {
|
||||
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.IngressTestJig, staticIPName string) {
|
||||
By("Creating a set of ingress, service and deployment that have backside re-encryption configured")
|
||||
deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName)
|
||||
defer func() {
|
||||
|
@ -1004,7 +1007,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *framework.Ing
|
|||
Expect(err).NotTo(HaveOccurred(), "Failed to wait for ingress IP")
|
||||
|
||||
By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
|
||||
timeoutClient := &http.Client{Timeout: framework.IngressReqTimeout}
|
||||
timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout}
|
||||
err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
|
||||
if err != nil {
|
||||
|
@ -1020,8 +1023,8 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *framework.Ing
|
|||
Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress")
|
||||
}
|
||||
|
||||
func detectHttpVersionAndSchemeTest(f *framework.Framework, jig *framework.IngressTestJig, address, version, scheme string) {
|
||||
timeoutClient := &http.Client{Timeout: framework.IngressReqTimeout}
|
||||
func detectHttpVersionAndSchemeTest(f *framework.Framework, jig *ingress.IngressTestJig, address, version, scheme string) {
|
||||
timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout}
|
||||
resp := ""
|
||||
err := wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", address), "")
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -53,7 +54,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
|||
}
|
||||
for _, lb := range serviceLBNames {
|
||||
framework.Logf("cleaning gce resource for %s", lb)
|
||||
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
|
||||
framework.TestContext.CloudConfig.Provider.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
|
||||
}
|
||||
//reset serviceLBNames
|
||||
serviceLBNames = []string{}
|
||||
|
@ -102,7 +103,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
|||
// Test 3: create a standard-tierd LB with a user-requested IP.
|
||||
By("reserving a static IP for the load balancer")
|
||||
requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunId)
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
requestedIP, err := reserveAlphaRegionalAddress(gceCloud, requestedAddrName, cloud.NetworkTierStandard)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to reserve a STANDARD tiered address")
|
||||
|
@ -187,7 +188,7 @@ func getLBNetworkTierByIP(ip string) (cloud.NetworkTier, error) {
|
|||
}
|
||||
|
||||
func getGCEForwardingRuleByIP(ip string) (*computealpha.ForwardingRule, error) {
|
||||
cloud, err := framework.GetGCECloud()
|
||||
cloud, err := gce.GetGCECloud()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -29,6 +29,8 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/ingress"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -59,10 +61,10 @@ var (
|
|||
// IngressScaleFramework defines the framework for ingress scale testing.
|
||||
type IngressScaleFramework struct {
|
||||
Clientset clientset.Interface
|
||||
Jig *framework.IngressTestJig
|
||||
GCEController *framework.GCEIngressController
|
||||
Jig *ingress.IngressTestJig
|
||||
GCEController *gce.GCEIngressController
|
||||
CloudConfig framework.CloudConfig
|
||||
Logger framework.TestLogger
|
||||
Logger ingress.TestLogger
|
||||
|
||||
Namespace string
|
||||
EnableTLS bool
|
||||
|
@ -92,7 +94,7 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra
|
|||
Namespace: ns,
|
||||
Clientset: cs,
|
||||
CloudConfig: cloudConfig,
|
||||
Logger: &framework.E2ELogger{},
|
||||
Logger: &ingress.E2ELogger{},
|
||||
EnableTLS: true,
|
||||
NumIngressesTest: []int{
|
||||
numIngressesSmall,
|
||||
|
@ -106,10 +108,10 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra
|
|||
// PrepareScaleTest prepares framework for ingress scale testing.
|
||||
func (f *IngressScaleFramework) PrepareScaleTest() error {
|
||||
f.Logger.Infof("Initializing ingress test suite and gce controller...")
|
||||
f.Jig = framework.NewIngressTestJig(f.Clientset)
|
||||
f.Jig = ingress.NewIngressTestJig(f.Clientset)
|
||||
f.Jig.Logger = f.Logger
|
||||
f.Jig.PollInterval = scaleTestPollInterval
|
||||
f.GCEController = &framework.GCEIngressController{
|
||||
f.GCEController = &gce.GCEIngressController{
|
||||
Client: f.Clientset,
|
||||
Cloud: f.CloudConfig,
|
||||
}
|
||||
|
|
|
@ -33,6 +33,8 @@ import (
|
|||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/ingress"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/network/scale"
|
||||
)
|
||||
|
||||
|
@ -117,7 +119,7 @@ func main() {
|
|||
glog.Errorf("Error building GCE provider: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
cloudConfig.Provider = gceCloud
|
||||
cloudConfig.Provider = gce.NewProvider(gceCloud)
|
||||
|
||||
testSuccessFlag := true
|
||||
defer func() {
|
||||
|
@ -150,7 +152,7 @@ func main() {
|
|||
|
||||
// Setting up a localized scale test framework.
|
||||
f := scale.NewIngressScaleFramework(cs, ns.Name, cloudConfig)
|
||||
f.Logger = &framework.GLogger{}
|
||||
f.Logger = &ingress.GLogger{}
|
||||
// Customizing scale test.
|
||||
f.EnableTLS = enableTLS
|
||||
f.OutputFile = outputFile
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller/endpoint"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -589,7 +590,7 @@ var _ = SIGDescribe("Services", func() {
|
|||
if framework.ProviderIs("gce", "gke") {
|
||||
By("creating a static load balancer IP")
|
||||
staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunId)
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region())
|
||||
|
@ -646,7 +647,7 @@ var _ = SIGDescribe("Services", func() {
|
|||
// coming from, so this is first-aid rather than surgery).
|
||||
By("demoting the static IP to ephemeral")
|
||||
if staticIPName != "" {
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Deleting it after it is attached "demotes" it to an
|
||||
// ephemeral IP, which can be auto-released.
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
|
||||
|
@ -59,7 +60,7 @@ var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
|
|||
|
||||
// OnlyAllowNodeZones tests that GetAllCurrentZones returns only zones with Nodes
|
||||
func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Get all the zones that the nodes are in
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
@ -385,7 +386,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
|||
|
||||
if disruptOp == deleteNode {
|
||||
By("getting gce instances")
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Unable to create gcloud client err=%v", err))
|
||||
output, err := gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output))
|
||||
|
@ -476,7 +477,7 @@ func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName
|
|||
|
||||
func detachPD(nodeName types.NodeName, pdName string) error {
|
||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -580,7 +581,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
|
|||
func waitForPDDetach(diskName string, nodeName types.NodeName) error {
|
||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||
framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -26,12 +26,13 @@ import (
|
|||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
// verifyGCEDiskAttached performs a sanity check to verify the PD attached to the node
|
||||
func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
isAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
@ -210,7 +211,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
|
|||
waitStatus <- waitForStatefulSetReplicasNotReady(statefulSet.Name, ns, c)
|
||||
}()
|
||||
|
||||
cloud, err := framework.GetGCECloud()
|
||||
cloud, err := gce.GetGCECloud()
|
||||
if err != nil {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
|
|
@ -46,6 +46,7 @@ import (
|
|||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
@ -332,7 +333,7 @@ func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool)
|
|||
}
|
||||
|
||||
func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error {
|
||||
cloud, err := framework.GetGCECloud()
|
||||
cloud, err := gce.GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -609,7 +610,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||
allZones := sets.NewString() // all zones in the project
|
||||
managedZones := sets.NewString() // subset of allZones
|
||||
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Get all k8s managed zones (same as zones with nodes in them for test)
|
||||
|
|
|
@ -31,6 +31,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/ingress"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
)
|
||||
|
||||
// Dependent on "static-ip-2" manifests
|
||||
|
@ -39,10 +41,10 @@ const host = "ingress.test.com"
|
|||
|
||||
// IngressUpgradeTest adapts the Ingress e2e for upgrade testing
|
||||
type IngressUpgradeTest struct {
|
||||
gceController *framework.GCEIngressController
|
||||
gceController *gce.GCEIngressController
|
||||
// holds GCP resources pre-upgrade
|
||||
resourceStore *GCPResourceStore
|
||||
jig *framework.IngressTestJig
|
||||
jig *ingress.IngressTestJig
|
||||
httpClient *http.Client
|
||||
ip string
|
||||
ipName string
|
||||
|
@ -73,12 +75,12 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
|
|||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
// jig handles all Kubernetes testing logic
|
||||
jig := framework.NewIngressTestJig(f.ClientSet)
|
||||
jig := ingress.NewIngressTestJig(f.ClientSet)
|
||||
|
||||
ns := f.Namespace
|
||||
|
||||
// gceController handles all cloud testing logic
|
||||
gceController := &framework.GCEIngressController{
|
||||
gceController := &gce.GCEIngressController{
|
||||
Ns: ns.Name,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
|
@ -87,7 +89,7 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
|
|||
|
||||
t.gceController = gceController
|
||||
t.jig = jig
|
||||
t.httpClient = framework.BuildInsecureClient(framework.IngressReqTimeout)
|
||||
t.httpClient = ingress.BuildInsecureClient(ingress.IngressReqTimeout)
|
||||
|
||||
// Allocate a static-ip for the Ingress, this IP is cleaned up via CleanupGCEIngressController
|
||||
t.ipName = fmt.Sprintf("%s-static-ip", ns.Name)
|
||||
|
@ -95,9 +97,9 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
|
|||
|
||||
// Create a working basic Ingress
|
||||
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip))
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip-2"), ns.Name, map[string]string{
|
||||
framework.IngressStaticIPKey: t.ipName,
|
||||
framework.IngressAllowHTTPKey: "false",
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip-2"), ns.Name, map[string]string{
|
||||
ingress.IngressStaticIPKey: t.ipName,
|
||||
ingress.IngressAllowHTTPKey: "false",
|
||||
}, map[string]string{})
|
||||
t.jig.SetHTTPS("tls-secret", "ingress.test.com")
|
||||
|
||||
|
|
|
@ -71,6 +71,7 @@ func init() {
|
|||
// It seems that someone is using flag.Parse() after init() and TestMain().
|
||||
// TODO(random-liu): Find who is using flag.Parse() and cause errors and move the following logic
|
||||
// into TestContext.
|
||||
// TODO(pohly): remove RegisterNodeFlags from test_context.go enable Viper config support here?
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
|
Loading…
Reference in New Issue