From 3e0dd7f3650f5f40cd52607f1e41bcb755ba6b1b Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sun, 10 Feb 2019 23:51:27 -0500 Subject: [PATCH 1/2] upgrade: ClusterUpgrade and MasterUpgrade are equivalent Some tests were only checking for MasterUpgrade, but that is also implied by ClusterUpgrade. --- test/e2e/upgrades/apparmor.go | 2 +- test/e2e/upgrades/ingress.go | 2 +- test/e2e/upgrades/nvidia-gpu.go | 4 ++-- test/e2e/upgrades/services.go | 4 ++-- test/e2e/upgrades/sysctl.go | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/e2e/upgrades/apparmor.go b/test/e2e/upgrades/apparmor.go index 89614a8598..235710b364 100644 --- a/test/e2e/upgrades/apparmor.go +++ b/test/e2e/upgrades/apparmor.go @@ -66,7 +66,7 @@ func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) { // pod can still consume the secret. func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { <-done - if upgrade == MasterUpgrade { + if upgrade == MasterUpgrade || upgrade == ClusterUpgrade { t.verifyPodStillUp(f) } t.verifyNodesAppArmorEnabled(f) diff --git a/test/e2e/upgrades/ingress.go b/test/e2e/upgrades/ingress.go index fbaacc3b7d..d2d3564eb7 100644 --- a/test/e2e/upgrades/ingress.go +++ b/test/e2e/upgrades/ingress.go @@ -115,7 +115,7 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) { // with a connectvity check to the loadbalancer ip. func (t *IngressUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { switch upgrade { - case MasterUpgrade: + case MasterUpgrade, ClusterUpgrade: // Restarting the ingress controller shouldn't disrupt a steady state // Ingress. Restarting the ingress controller and deleting ingresses // while it's down will leak cloud resources, because the ingress diff --git a/test/e2e/upgrades/nvidia-gpu.go b/test/e2e/upgrades/nvidia-gpu.go index 12abd0e34a..c2fe6c69e6 100644 --- a/test/e2e/upgrades/nvidia-gpu.go +++ b/test/e2e/upgrades/nvidia-gpu.go @@ -20,7 +20,7 @@ import ( "regexp" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/scheduling" @@ -50,7 +50,7 @@ func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{} <-done By("Verifying gpu job success") t.verifyJobPodSuccess(f) - if upgrade == MasterUpgrade { + if upgrade == MasterUpgrade || upgrade == ClusterUpgrade { // MasterUpgrade should be totally hitless. job, err := framework.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add") Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/upgrades/services.go b/test/e2e/upgrades/services.go index 5cdd2c86eb..fcdf33c559 100644 --- a/test/e2e/upgrades/services.go +++ b/test/e2e/upgrades/services.go @@ -17,7 +17,7 @@ limitations under the License. package upgrades import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" @@ -77,7 +77,7 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) { // Test runs a connectivity check to the service. func (t *ServiceUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { switch upgrade { - case MasterUpgrade: + case MasterUpgrade, ClusterUpgrade: t.test(f, done, true) case NodeUpgrade: // Node upgrades should test during disruption only on GCE/GKE for now. diff --git a/test/e2e/upgrades/sysctl.go b/test/e2e/upgrades/sysctl.go index 5105887f0e..1720a40e7f 100644 --- a/test/e2e/upgrades/sysctl.go +++ b/test/e2e/upgrades/sysctl.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" @@ -52,7 +52,7 @@ func (t *SysctlUpgradeTest) Setup(f *framework.Framework) { func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) { <-done switch upgrade { - case MasterUpgrade: + case MasterUpgrade, ClusterUpgrade: By("Checking the safe sysctl pod keeps running on master upgrade") pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) From a7dfbb99c4218bd8ceb8c589061a79a29e893ad7 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sun, 10 Feb 2019 23:53:57 -0500 Subject: [PATCH 2/2] updates: DaemonSet test should tolerate all taints The test specifically includes all nodes, but doesn't actually ensure the daemonset covers all nodes despite taints. --- test/e2e/upgrades/apps/daemonsets.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/e2e/upgrades/apps/daemonsets.go b/test/e2e/upgrades/apps/daemonsets.go index 7ef91fae28..4e1b0b4ae4 100644 --- a/test/e2e/upgrades/apps/daemonsets.go +++ b/test/e2e/upgrades/apps/daemonsets.go @@ -20,7 +20,7 @@ import ( . "github.com/onsi/ginkgo" apps "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" @@ -59,6 +59,9 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) { Labels: labelSet, }, Spec: v1.PodSpec{ + Tolerations: []v1.Toleration{ + {Operator: v1.TolerationOpExists}, + }, Containers: []v1.Container{ { Name: daemonSetName,