upgrade: ClusterUpgrade and MasterUpgrade are equivalent

Some tests were only checking for MasterUpgrade, but that is also
implied by ClusterUpgrade.
pull/564/head
Clayton Coleman 2019-02-10 23:51:27 -05:00
parent 162b79d2ec
commit 3e0dd7f365
No known key found for this signature in database
GPG Key ID: 3D16906B4F1C5CB3
5 changed files with 8 additions and 8 deletions

View File

@ -66,7 +66,7 @@ func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) {
// pod can still consume the secret.
func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done
if upgrade == MasterUpgrade {
if upgrade == MasterUpgrade || upgrade == ClusterUpgrade {
t.verifyPodStillUp(f)
}
t.verifyNodesAppArmorEnabled(f)

View File

@ -115,7 +115,7 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
// with a connectvity check to the loadbalancer ip.
func (t *IngressUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
switch upgrade {
case MasterUpgrade:
case MasterUpgrade, ClusterUpgrade:
// Restarting the ingress controller shouldn't disrupt a steady state
// Ingress. Restarting the ingress controller and deleting ingresses
// while it's down will leak cloud resources, because the ingress

View File

@ -20,7 +20,7 @@ import (
"regexp"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/scheduling"
@ -50,7 +50,7 @@ func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
<-done
By("Verifying gpu job success")
t.verifyJobPodSuccess(f)
if upgrade == MasterUpgrade {
if upgrade == MasterUpgrade || upgrade == ClusterUpgrade {
// MasterUpgrade should be totally hitless.
job, err := framework.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
Expect(err).NotTo(HaveOccurred())

View File

@ -17,7 +17,7 @@ limitations under the License.
package upgrades
import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@ -77,7 +77,7 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
// Test runs a connectivity check to the service.
func (t *ServiceUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
switch upgrade {
case MasterUpgrade:
case MasterUpgrade, ClusterUpgrade:
t.test(f, done, true)
case NodeUpgrade:
// Node upgrades should test during disruption only on GCE/GKE for now.

View File

@ -22,7 +22,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@ -52,7 +52,7 @@ func (t *SysctlUpgradeTest) Setup(f *framework.Framework) {
func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done
switch upgrade {
case MasterUpgrade:
case MasterUpgrade, ClusterUpgrade:
By("Checking the safe sysctl pod keeps running on master upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())