mirror of https://github.com/k3s-io/k3s
Remove Ingress-GCE upgrade tests from k/k
parent
28910bf2d7
commit
31d5e3c1a6
|
@ -39,15 +39,6 @@ func EtcdUpgrade(target_storage, target_version string) error {
|
|||
}
|
||||
}
|
||||
|
||||
func IngressUpgrade(isUpgrade bool) error {
|
||||
switch TestContext.Provider {
|
||||
case "gce":
|
||||
return ingressUpgradeGCE(isUpgrade)
|
||||
default:
|
||||
return fmt.Errorf("IngressUpgrade() is not implemented for provider %s", TestContext.Provider)
|
||||
}
|
||||
}
|
||||
|
||||
func MasterUpgrade(v string) error {
|
||||
switch TestContext.Provider {
|
||||
case "gce":
|
||||
|
@ -72,27 +63,6 @@ func etcdUpgradeGCE(target_storage, target_version string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func ingressUpgradeGCE(isUpgrade bool) error {
|
||||
var command string
|
||||
if isUpgrade {
|
||||
// User specified image to upgrade to.
|
||||
targetImage := TestContext.IngressUpgradeImage
|
||||
if targetImage != "" {
|
||||
command = fmt.Sprintf("sudo sed -i -re 's|(image:)(.*)|\\1 %s|' /etc/kubernetes/manifests/glbc.manifest", targetImage)
|
||||
} else {
|
||||
// Upgrade to latest HEAD image.
|
||||
command = "sudo sed -i -re 's/(image:)(.*)/\\1 gcr.io\\/k8s-ingress-image-push\\/ingress-gce-e2e-glbc-amd64:master/' /etc/kubernetes/manifests/glbc.manifest"
|
||||
}
|
||||
} else {
|
||||
// Downgrade to latest release image.
|
||||
command = "sudo sed -i -re 's/(image:)(.*)/\\1 k8s.gcr.io\\/ingress-gce-glbc-amd64:v1.1.1/' /etc/kubernetes/manifests/glbc.manifest"
|
||||
}
|
||||
// Kubelet should restart glbc automatically.
|
||||
sshResult, err := NodeExec(GetMasterHost(), command)
|
||||
LogSSHResult(sshResult)
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
|
||||
func MasterUpgradeGCEWithKubeProxyDaemonSet(v string, enableKubeProxyDaemonSet bool) error {
|
||||
return masterUpgradeGCE(v, enableKubeProxyDaemonSet)
|
||||
|
|
|
@ -100,7 +100,6 @@ type TestContextType struct {
|
|||
SystemPodsStartupTimeout time.Duration
|
||||
EtcdUpgradeStorage string
|
||||
EtcdUpgradeVersion string
|
||||
IngressUpgradeImage string
|
||||
GCEUpgradeScript string
|
||||
ContainerRuntime string
|
||||
ContainerRuntimeEndpoint string
|
||||
|
@ -316,7 +315,6 @@ func RegisterClusterFlags() {
|
|||
flag.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
|
||||
flag.StringVar(&TestContext.IngressUpgradeImage, "ingress-upgrade-image", "", "Image to upgrade to if doing an upgrade test for ingress.")
|
||||
flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
|
||||
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
|
||||
|
||||
|
|
|
@ -56,7 +56,6 @@ var upgradeTests = []upgrades.Test{
|
|||
&upgrades.HPAUpgradeTest{},
|
||||
&storage.PersistentVolumeUpgradeTest{},
|
||||
&apps.DaemonSetUpgradeTest{},
|
||||
&upgrades.IngressUpgradeTest{},
|
||||
&upgrades.AppArmorUpgradeTest{},
|
||||
&storage.VolumeModeDowngradeTest{},
|
||||
}
|
||||
|
@ -74,18 +73,11 @@ var statefulsetUpgradeTests = []upgrades.Test{
|
|||
var kubeProxyUpgradeTests = []upgrades.Test{
|
||||
&upgrades.KubeProxyUpgradeTest{},
|
||||
&upgrades.ServiceUpgradeTest{},
|
||||
&upgrades.IngressUpgradeTest{},
|
||||
}
|
||||
|
||||
var kubeProxyDowngradeTests = []upgrades.Test{
|
||||
&upgrades.KubeProxyDowngradeTest{},
|
||||
&upgrades.ServiceUpgradeTest{},
|
||||
&upgrades.IngressUpgradeTest{},
|
||||
}
|
||||
|
||||
// Forcefully swap ingress image.
|
||||
var ingressUpgradeTests = []upgrades.Test{
|
||||
&upgrades.IngressUpgradeTest{},
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
||||
|
@ -217,56 +209,6 @@ var _ = SIGDescribe("etcd Upgrade [Feature:EtcdUpgrade]", func() {
|
|||
})
|
||||
})
|
||||
|
||||
var _ = SIGDescribe("ingress Upgrade [Feature:IngressUpgrade]", func() {
|
||||
f := framework.NewDefaultFramework("ingress-upgrade")
|
||||
|
||||
// Create the frameworks here because we can only create them
|
||||
// in a "Describe".
|
||||
testFrameworks := createUpgradeFrameworks(ingressUpgradeTests)
|
||||
Describe("ingress upgrade", func() {
|
||||
It("should maintain a functioning ingress", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), "")
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "ingress upgrade"}
|
||||
ingressTest := &junit.TestCase{Name: "[sig-networking] ingress-upgrade", Classname: "upgrade_tests"}
|
||||
testSuite.TestCases = append(testSuite.TestCases, ingressTest)
|
||||
|
||||
upgradeFunc := func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, ingressTest)
|
||||
framework.ExpectNoError(framework.IngressUpgrade(true))
|
||||
}
|
||||
runUpgradeSuite(f, ingressUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.IngressUpgrade, upgradeFunc)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = SIGDescribe("ingress Downgrade [Feature:IngressDowngrade]", func() {
|
||||
f := framework.NewDefaultFramework("ingress-downgrade")
|
||||
|
||||
// Create the frameworks here because we can only create them
|
||||
// in a "Describe".
|
||||
testFrameworks := createUpgradeFrameworks(ingressUpgradeTests)
|
||||
Describe("ingress downgrade", func() {
|
||||
It("should maintain a functioning ingress", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), "")
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "ingress downgrade"}
|
||||
ingressTest := &junit.TestCase{Name: "[sig-networking] ingress-downgrade", Classname: "upgrade_tests"}
|
||||
testSuite.TestCases = append(testSuite.TestCases, ingressTest)
|
||||
|
||||
upgradeFunc := func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, ingressTest)
|
||||
framework.ExpectNoError(framework.IngressUpgrade(false))
|
||||
}
|
||||
runUpgradeSuite(f, ingressUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.IngressUpgrade, upgradeFunc)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
||||
f := framework.NewDefaultFramework("gpu-upgrade")
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@ go_library(
|
|||
"configmaps.go",
|
||||
"etcd.go",
|
||||
"horizontal_pod_autoscalers.go",
|
||||
"ingress.go",
|
||||
"kube_proxy_migration.go",
|
||||
"mysql.go",
|
||||
"nvidia-gpu.go",
|
||||
|
@ -28,7 +27,6 @@ go_library(
|
|||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -39,16 +37,12 @@ go_library(
|
|||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/ingress:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/gstruct:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -1,269 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package upgrades
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/ingress"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
)
|
||||
|
||||
// Dependent on "static-ip-2" manifests
|
||||
const path = "foo"
|
||||
const host = "ingress.test.com"
|
||||
|
||||
// IngressUpgradeTest adapts the Ingress e2e for upgrade testing
|
||||
type IngressUpgradeTest struct {
|
||||
gceController *gce.GCEIngressController
|
||||
// holds GCP resources pre-upgrade
|
||||
resourceStore *GCPResourceStore
|
||||
jig *ingress.TestJig
|
||||
httpClient *http.Client
|
||||
ip string
|
||||
ipName string
|
||||
skipSSLCheck bool
|
||||
}
|
||||
|
||||
// GCPResourceStore keeps track of the GCP resources spun up by an ingress.
|
||||
// Note: Fields are exported so that we can utilize reflection.
|
||||
type GCPResourceStore struct {
|
||||
Fw *compute.Firewall
|
||||
FwdList []*compute.ForwardingRule
|
||||
UmList []*compute.UrlMap
|
||||
TpList []*compute.TargetHttpProxy
|
||||
TpsList []*compute.TargetHttpsProxy
|
||||
SslList []*compute.SslCertificate
|
||||
BeList []*compute.BackendService
|
||||
IP *compute.Address
|
||||
IgList []*compute.InstanceGroup
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
func (IngressUpgradeTest) Name() string { return "ingress-upgrade" }
|
||||
|
||||
// Setup creates a GLBC, allocates an ip, and an ingress resource,
|
||||
// then waits for a successful connectivity check to the ip.
|
||||
// Also keeps track of all load balancer resources for cross-checking
|
||||
// during an IngressUpgrade.
|
||||
func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
// jig handles all Kubernetes testing logic
|
||||
jig := ingress.NewIngressTestJig(f.ClientSet)
|
||||
|
||||
ns := f.Namespace
|
||||
|
||||
// gceController handles all cloud testing logic
|
||||
gceController := &gce.GCEIngressController{
|
||||
Ns: ns.Name,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
framework.ExpectNoError(gceController.Init())
|
||||
|
||||
t.gceController = gceController
|
||||
t.jig = jig
|
||||
t.httpClient = ingress.BuildInsecureClient(ingress.IngressReqTimeout)
|
||||
|
||||
// Allocate a static-ip for the Ingress, this IP is cleaned up via CleanupGCEIngressController
|
||||
t.ipName = fmt.Sprintf("%s-static-ip", ns.Name)
|
||||
t.ip = t.gceController.CreateStaticIP(t.ipName)
|
||||
|
||||
// Create a working basic Ingress
|
||||
ginkgo.By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip))
|
||||
jig.CreateIngress(filepath.Join(ingress.GCEIngressManifestPath, "static-ip-2"), ns.Name, map[string]string{
|
||||
ingress.IngressStaticIPKey: t.ipName,
|
||||
ingress.IngressAllowHTTPKey: "false",
|
||||
}, map[string]string{})
|
||||
t.jig.SetHTTPS("tls-secret", "ingress.test.com")
|
||||
|
||||
ginkgo.By("waiting for Ingress to come up with ip: " + t.ip)
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
|
||||
|
||||
ginkgo.By("keeping track of GCP resources created by Ingress")
|
||||
t.resourceStore = &GCPResourceStore{}
|
||||
t.populateGCPResourceStore(t.resourceStore)
|
||||
}
|
||||
|
||||
// Test waits for the upgrade to complete, and then verifies
|
||||
// with a connectivity check to the loadbalancer ip.
|
||||
func (t *IngressUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
|
||||
switch upgrade {
|
||||
case MasterUpgrade, ClusterUpgrade:
|
||||
// Restarting the ingress controller shouldn't disrupt a steady state
|
||||
// Ingress. Restarting the ingress controller and deleting ingresses
|
||||
// while it's down will leak cloud resources, because the ingress
|
||||
// controller doesn't checkpoint to disk.
|
||||
t.verify(f, done, true)
|
||||
case IngressUpgrade:
|
||||
t.verify(f, done, true)
|
||||
default:
|
||||
// Currently ingress gets disrupted across node upgrade, because endpoints
|
||||
// get killed and we don't have any guarantees that 2 nodes don't overlap
|
||||
// their upgrades (even on cloud platforms like GCE, because VM level
|
||||
// rolling upgrades are not Kubernetes aware).
|
||||
t.verify(f, done, false)
|
||||
}
|
||||
}
|
||||
|
||||
// Teardown cleans up any remaining resources.
|
||||
func (t *IngressUpgradeTest) Teardown(f *framework.Framework) {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DescribeIng(t.gceController.Ns)
|
||||
}
|
||||
|
||||
if t.jig.Ingress != nil {
|
||||
ginkgo.By("Deleting ingress")
|
||||
t.jig.TryDeleteIngress()
|
||||
} else {
|
||||
ginkgo.By("No ingress created, no cleanup necessary")
|
||||
}
|
||||
|
||||
ginkgo.By("Cleaning up cloud resources")
|
||||
framework.ExpectNoError(t.gceController.CleanupGCEIngressController())
|
||||
}
|
||||
|
||||
// Skip checks if the test or part of the test should be skipped.
|
||||
func (t *IngressUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
||||
sslNameChangeVersion, err := version.ParseGeneric("v1.10.0")
|
||||
framework.ExpectNoError(err)
|
||||
var hasVersionBelow, hasVersionAboveOrEqual bool
|
||||
for _, v := range upgCtx.Versions {
|
||||
if v.Version.LessThan(sslNameChangeVersion) {
|
||||
hasVersionBelow = true
|
||||
continue
|
||||
}
|
||||
hasVersionAboveOrEqual = true
|
||||
}
|
||||
// Skip SSL certificates check if k8s version changes between 1.10-
|
||||
// and 1.10+ because the naming scheme has changed.
|
||||
if hasVersionBelow && hasVersionAboveOrEqual {
|
||||
t.skipSSLCheck = true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
|
||||
if testDuringDisruption {
|
||||
ginkgo.By("continuously hitting the Ingress IP")
|
||||
wait.Until(func() {
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
|
||||
}, t.jig.PollInterval, done)
|
||||
} else {
|
||||
ginkgo.By("waiting for upgrade to finish without checking if Ingress remains up")
|
||||
<-done
|
||||
}
|
||||
ginkgo.By("hitting the Ingress IP " + t.ip)
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
|
||||
|
||||
// We want to manually trigger a sync because then we can easily verify
|
||||
// a correct sync completed after update.
|
||||
ginkgo.By("updating ingress spec to manually trigger a sync")
|
||||
t.jig.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths = append(
|
||||
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths,
|
||||
extensions.HTTPIngressPath{
|
||||
Path: "/test",
|
||||
// Note: Dependant on using "static-ip-2" manifest.
|
||||
Backend: ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Backend,
|
||||
})
|
||||
})
|
||||
// WaitForIngress() tests that all paths are pinged, which is how we know
|
||||
// everything is synced with the cloud.
|
||||
t.jig.WaitForIngress(false)
|
||||
ginkgo.By("comparing GCP resources post-upgrade")
|
||||
postUpgradeResourceStore := &GCPResourceStore{}
|
||||
t.populateGCPResourceStore(postUpgradeResourceStore)
|
||||
|
||||
// Stub out the number of instances as that is out of Ingress controller's control.
|
||||
for _, ig := range t.resourceStore.IgList {
|
||||
ig.Size = 0
|
||||
}
|
||||
for _, ig := range postUpgradeResourceStore.IgList {
|
||||
ig.Size = 0
|
||||
}
|
||||
// Stub out compute.SslCertificates in case we know it will change during an upgrade/downgrade.
|
||||
if t.skipSSLCheck {
|
||||
t.resourceStore.SslList = nil
|
||||
postUpgradeResourceStore.SslList = nil
|
||||
}
|
||||
|
||||
// TODO(rramkumar): Remove this when GLBC v1.2.0 is released.
|
||||
t.resourceStore.BeList = nil
|
||||
postUpgradeResourceStore.BeList = nil
|
||||
|
||||
framework.ExpectNoError(compareGCPResourceStores(t.resourceStore, postUpgradeResourceStore, func(v1 reflect.Value, v2 reflect.Value) error {
|
||||
i1 := v1.Interface()
|
||||
i2 := v2.Interface()
|
||||
// Skip verifying the UrlMap since we did that via WaitForIngress()
|
||||
if !reflect.DeepEqual(i1, i2) && (v1.Type() != reflect.TypeOf([]*compute.UrlMap{})) {
|
||||
return spew.Errorf("resources after ingress upgrade were different:\n Pre-Upgrade: %#v\n Post-Upgrade: %#v", i1, i2)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
func (t *IngressUpgradeTest) populateGCPResourceStore(resourceStore *GCPResourceStore) {
|
||||
cont := t.gceController
|
||||
resourceStore.Fw = cont.GetFirewallRule()
|
||||
resourceStore.FwdList = cont.ListGlobalForwardingRules()
|
||||
resourceStore.UmList = cont.ListUrlMaps()
|
||||
resourceStore.TpList = cont.ListTargetHttpProxies()
|
||||
resourceStore.TpsList = cont.ListTargetHttpsProxies()
|
||||
resourceStore.SslList = cont.ListSslCertificates()
|
||||
resourceStore.BeList = cont.ListGlobalBackendServices()
|
||||
resourceStore.IP = cont.GetGlobalAddress(t.ipName)
|
||||
resourceStore.IgList = cont.ListInstanceGroups()
|
||||
}
|
||||
|
||||
func compareGCPResourceStores(rs1 *GCPResourceStore, rs2 *GCPResourceStore, compare func(v1 reflect.Value, v2 reflect.Value) error) error {
|
||||
// Before we do a comparison, remove the ServerResponse field from the
|
||||
// Compute API structs. This is needed because two objects could be the same
|
||||
// but their ServerResponse will be different if they were populated through
|
||||
// separate API calls.
|
||||
rs1Json, _ := json.Marshal(rs1)
|
||||
rs2Json, _ := json.Marshal(rs2)
|
||||
rs1New := &GCPResourceStore{}
|
||||
rs2New := &GCPResourceStore{}
|
||||
json.Unmarshal(rs1Json, rs1New)
|
||||
json.Unmarshal(rs2Json, rs2New)
|
||||
|
||||
// Iterate through struct fields and perform equality checks on the fields.
|
||||
// We do this rather than performing a deep equal on the struct itself because
|
||||
// it is easier to log which field, if any, is not the same.
|
||||
rs1V := reflect.ValueOf(*rs1New)
|
||||
rs2V := reflect.ValueOf(*rs2New)
|
||||
for i := 0; i < rs1V.NumField(); i++ {
|
||||
if err := compare(rs1V.Field(i), rs2V.Field(i)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -40,9 +40,6 @@ const (
|
|||
// EtcdUpgrade indicates that only etcd is being upgraded (or migrated
|
||||
// between storage versions).
|
||||
EtcdUpgrade
|
||||
|
||||
// IngressUpgrade indicates that only ingress is being upgraded.
|
||||
IngressUpgrade
|
||||
)
|
||||
|
||||
// Test is an interface for upgrade tests.
|
||||
|
|
Loading…
Reference in New Issue