mirror of https://github.com/k3s-io/k3s
fix golint failures for test/e2e/upgrades
parent
7c8498ab03
commit
6322025d5c
|
@ -687,7 +687,6 @@ test/e2e/storage/testsuites
|
|||
test/e2e/storage/utils
|
||||
test/e2e/storage/vsphere
|
||||
test/e2e/ui
|
||||
test/e2e/upgrades
|
||||
test/e2e/windows
|
||||
test/e2e_kubeadm
|
||||
test/e2e_node
|
||||
|
|
|
@ -66,7 +66,7 @@ var gpuUpgradeTests = []upgrades.Test{
|
|||
}
|
||||
|
||||
var statefulsetUpgradeTests = []upgrades.Test{
|
||||
&upgrades.MySqlUpgradeTest{},
|
||||
&upgrades.MySQLUpgradeTest{},
|
||||
&upgrades.EtcdUpgradeTest{},
|
||||
&upgrades.CassandraUpgradeTest{},
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/gstruct"
|
||||
)
|
||||
|
||||
|
@ -32,8 +32,10 @@ type AppArmorUpgradeTest struct {
|
|||
pod *api.Pod
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
func (AppArmorUpgradeTest) Name() string { return "apparmor-upgrade" }
|
||||
|
||||
// Skip returns true when this test can be skipped.
|
||||
func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
||||
supportedImages := make(map[string]bool)
|
||||
for _, d := range common.AppArmorDistros {
|
||||
|
@ -50,11 +52,11 @@ func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
|||
|
||||
// Setup creates a secret and then verifies that a pod can consume it.
|
||||
func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) {
|
||||
By("Loading AppArmor profiles to nodes")
|
||||
ginkgo.By("Loading AppArmor profiles to nodes")
|
||||
common.LoadAppArmorProfiles(f)
|
||||
|
||||
// Create the initial test pod.
|
||||
By("Creating a long-running AppArmor enabled pod.")
|
||||
ginkgo.By("Creating a long-running AppArmor enabled pod.")
|
||||
t.pod = common.CreateAppArmorTestPod(f, false, false)
|
||||
|
||||
// Verify initial state.
|
||||
|
@ -76,32 +78,32 @@ func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
|
|||
// Teardown cleans up any remaining resources.
|
||||
func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
|
||||
// rely on the namespace deletion to clean up everything
|
||||
By("Logging container failures")
|
||||
ginkgo.By("Logging container failures")
|
||||
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
|
||||
}
|
||||
|
||||
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
|
||||
By("Verifying an AppArmor profile is continuously enforced for a pod")
|
||||
ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod")
|
||||
pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Should be able to get pod")
|
||||
Expect(pod.Status.Phase).To(Equal(api.PodRunning), "Pod should stay running")
|
||||
Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(BeNil(), "Container should be running")
|
||||
Expect(pod.Status.ContainerStatuses[0].RestartCount).To(BeZero(), "Container should not need to be restarted")
|
||||
gomega.Expect(pod.Status.Phase).To(gomega.Equal(api.PodRunning), "Pod should stay running")
|
||||
gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running")
|
||||
gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).To(gomega.BeZero(), "Container should not need to be restarted")
|
||||
}
|
||||
|
||||
func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) {
|
||||
By("Verifying an AppArmor profile is enforced for a new pod")
|
||||
ginkgo.By("Verifying an AppArmor profile is enforced for a new pod")
|
||||
common.CreateAppArmorTestPod(f, false, true)
|
||||
}
|
||||
|
||||
func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) {
|
||||
By("Verifying nodes are AppArmor enabled")
|
||||
ginkgo.By("Verifying nodes are AppArmor enabled")
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "Failed to list nodes")
|
||||
for _, node := range nodes.Items {
|
||||
Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{
|
||||
gomega.Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{
|
||||
"Ready": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
|
||||
"Message": ContainSubstring("AppArmor enabled"),
|
||||
"Message": gomega.ContainSubstring("AppArmor enabled"),
|
||||
}),
|
||||
}))
|
||||
}
|
||||
|
|
|
@ -25,8 +25,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
|
@ -60,7 +60,7 @@ func (CassandraUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
|||
}
|
||||
|
||||
func cassandraKubectlCreate(ns, file string) {
|
||||
input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file), Fail))
|
||||
input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file), ginkgo.Fail))
|
||||
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
|
||||
}
|
||||
|
||||
|
@ -75,16 +75,16 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
|
|||
statefulsetTimeout := 10 * time.Minute
|
||||
t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
|
||||
|
||||
By("Creating a PDB")
|
||||
ginkgo.By("Creating a PDB")
|
||||
cassandraKubectlCreate(ns, "pdb.yaml")
|
||||
|
||||
By("Creating a Cassandra StatefulSet")
|
||||
ginkgo.By("Creating a Cassandra StatefulSet")
|
||||
t.ssTester.CreateStatefulSet(cassandraManifestPath, ns)
|
||||
|
||||
By("Creating a cassandra-test-server deployment")
|
||||
ginkgo.By("Creating a cassandra-test-server deployment")
|
||||
cassandraKubectlCreate(ns, "tester.yaml")
|
||||
|
||||
By("Getting the ingress IPs from the services")
|
||||
ginkgo.By("Getting the ingress IPs from the services")
|
||||
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
|
||||
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
|
||||
return false, nil
|
||||
|
@ -95,18 +95,18 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
|
|||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.Logf("Service endpoint is up")
|
||||
|
||||
By("Adding 2 dummy users")
|
||||
Expect(t.addUser("Alice")).NotTo(HaveOccurred())
|
||||
Expect(t.addUser("Bob")).NotTo(HaveOccurred())
|
||||
ginkgo.By("Adding 2 dummy users")
|
||||
gomega.Expect(t.addUser("Alice")).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(t.addUser("Bob")).NotTo(gomega.HaveOccurred())
|
||||
t.successfulWrites = 2
|
||||
|
||||
By("Verifying that the users exist")
|
||||
ginkgo.By("Verifying that the users exist")
|
||||
users, err := t.listUsers()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(users)).To(Equal(2))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(len(users)).To(gomega.Equal(2))
|
||||
}
|
||||
|
||||
// listUsers gets a list of users from the db via the tester service.
|
||||
|
@ -151,7 +151,7 @@ func (t *CassandraUpgradeTest) addUser(name string) error {
|
|||
// getServiceIP is a helper method to extract the Ingress IP from the service.
|
||||
func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
|
||||
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ingress := svc.Status.LoadBalancer.Ingress
|
||||
if len(ingress) == 0 {
|
||||
return ""
|
||||
|
@ -165,7 +165,7 @@ func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName
|
|||
// ratio is over a certain threshold (0.75). We also verify that we get
|
||||
// at least the same number of rows back as we successfully wrote.
|
||||
func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
|
||||
By("Continuously polling the database during upgrade.")
|
||||
ginkgo.By("Continuously polling the database during upgrade.")
|
||||
var (
|
||||
success, failures, writeAttempts, lastUserCount int
|
||||
mu sync.Mutex
|
||||
|
@ -199,19 +199,19 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
|
|||
}, 10*time.Millisecond, done)
|
||||
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
|
||||
|
||||
Expect(lastUserCount >= t.successfulWrites).To(BeTrue())
|
||||
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
|
||||
ratio := float64(success) / float64(success+failures)
|
||||
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
|
||||
ratio = float64(t.successfulWrites) / float64(writeAttempts)
|
||||
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
|
||||
framework.Logf("Errors: %v", errors)
|
||||
// TODO(maisem): tweak this value once we have a few test runs.
|
||||
Expect(ratio > 0.75).To(BeTrue())
|
||||
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
|
||||
}
|
||||
|
||||
// Teardown does one final check of the data's availability.
|
||||
func (t *CassandraUpgradeTest) Teardown(f *framework.Framework) {
|
||||
users, err := t.listUsers()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(users) >= t.successfulWrites).To(BeTrue())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(len(users) >= t.successfulWrites).To(gomega.BeTrue())
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
)
|
||||
|
||||
|
@ -34,6 +34,7 @@ type ConfigMapUpgradeTest struct {
|
|||
configMap *v1.ConfigMap
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
func (ConfigMapUpgradeTest) Name() string {
|
||||
return "[sig-storage] [sig-api-machinery] configmap-upgrade"
|
||||
}
|
||||
|
@ -54,13 +55,13 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) {
|
|||
},
|
||||
}
|
||||
|
||||
By("Creating a ConfigMap")
|
||||
ginkgo.By("Creating a ConfigMap")
|
||||
var err error
|
||||
if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil {
|
||||
framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err)
|
||||
}
|
||||
|
||||
By("Making sure the ConfigMap is consumable")
|
||||
ginkgo.By("Making sure the ConfigMap is consumable")
|
||||
t.testPod(f)
|
||||
}
|
||||
|
||||
|
@ -68,7 +69,7 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) {
|
|||
// pod can still consume the ConfigMap.
|
||||
func (t *ConfigMapUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
|
||||
<-done
|
||||
By("Consuming the ConfigMap after upgrade")
|
||||
ginkgo.By("Consuming the ConfigMap after upgrade")
|
||||
t.testPod(f)
|
||||
}
|
||||
|
||||
|
|
|
@ -25,8 +25,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
|
@ -37,14 +37,17 @@ import (
|
|||
|
||||
const manifestPath = "test/e2e/testing-manifests/statefulset/etcd"
|
||||
|
||||
// EtcdUpgradeTest tests that etcd is writable before and after a cluster upgrade.
|
||||
type EtcdUpgradeTest struct {
|
||||
ip string
|
||||
successfulWrites int
|
||||
ssTester *framework.StatefulSetTester
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
func (EtcdUpgradeTest) Name() string { return "etcd-upgrade" }
|
||||
|
||||
// Skip returns true when this test can be skipped.
|
||||
func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
||||
minVersion := version.MustParseSemantic("1.6.0")
|
||||
for _, vCtx := range upgCtx.Versions {
|
||||
|
@ -56,26 +59,27 @@ func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
|||
}
|
||||
|
||||
func kubectlCreate(ns, file string) {
|
||||
input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file), Fail))
|
||||
input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file), ginkgo.Fail))
|
||||
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
|
||||
}
|
||||
|
||||
// Setup creates etcd statefulset and then verifies that the etcd is writable.
|
||||
func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
statefulsetPoll := 30 * time.Second
|
||||
statefulsetTimeout := 10 * time.Minute
|
||||
t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
|
||||
|
||||
By("Creating a PDB")
|
||||
ginkgo.By("Creating a PDB")
|
||||
kubectlCreate(ns, "pdb.yaml")
|
||||
|
||||
By("Creating an etcd StatefulSet")
|
||||
ginkgo.By("Creating an etcd StatefulSet")
|
||||
t.ssTester.CreateStatefulSet(manifestPath, ns)
|
||||
|
||||
By("Creating an etcd--test-server deployment")
|
||||
ginkgo.By("Creating an etcd--test-server deployment")
|
||||
kubectlCreate(ns, "tester.yaml")
|
||||
|
||||
By("Getting the ingress IPs from the services")
|
||||
ginkgo.By("Getting the ingress IPs from the services")
|
||||
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
|
||||
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
|
||||
return false, nil
|
||||
|
@ -86,18 +90,18 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
|
|||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.Logf("Service endpoint is up")
|
||||
|
||||
By("Adding 2 dummy users")
|
||||
Expect(t.addUser("Alice")).NotTo(HaveOccurred())
|
||||
Expect(t.addUser("Bob")).NotTo(HaveOccurred())
|
||||
ginkgo.By("Adding 2 dummy users")
|
||||
gomega.Expect(t.addUser("Alice")).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(t.addUser("Bob")).NotTo(gomega.HaveOccurred())
|
||||
t.successfulWrites = 2
|
||||
|
||||
By("Verifying that the users exist")
|
||||
ginkgo.By("Verifying that the users exist")
|
||||
users, err := t.listUsers()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(users)).To(Equal(2))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(len(users)).To(gomega.Equal(2))
|
||||
}
|
||||
|
||||
func (t *EtcdUpgradeTest) listUsers() ([]string, error) {
|
||||
|
@ -139,7 +143,7 @@ func (t *EtcdUpgradeTest) addUser(name string) error {
|
|||
|
||||
func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
|
||||
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ingress := svc.Status.LoadBalancer.Ingress
|
||||
if len(ingress) == 0 {
|
||||
return ""
|
||||
|
@ -147,8 +151,9 @@ func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName strin
|
|||
return ingress[0].IP
|
||||
}
|
||||
|
||||
// Test waits for upgrade to complete and verifies if etcd is writable.
|
||||
func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
|
||||
By("Continuously polling the database during upgrade.")
|
||||
ginkgo.By("Continuously polling the database during upgrade.")
|
||||
var (
|
||||
success, failures, writeAttempts, lastUserCount int
|
||||
mu sync.Mutex
|
||||
|
@ -182,19 +187,19 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
|
|||
}, 10*time.Millisecond, done)
|
||||
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
|
||||
|
||||
Expect(lastUserCount >= t.successfulWrites).To(BeTrue())
|
||||
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
|
||||
ratio := float64(success) / float64(success+failures)
|
||||
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
|
||||
ratio = float64(t.successfulWrites) / float64(writeAttempts)
|
||||
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
|
||||
framework.Logf("Errors: %v", errors)
|
||||
// TODO(maisem): tweak this value once we have a few test runs.
|
||||
Expect(ratio > 0.75).To(BeTrue())
|
||||
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
|
||||
}
|
||||
|
||||
// Teardown does one final check of the data's availability.
|
||||
func (t *EtcdUpgradeTest) Teardown(f *framework.Framework) {
|
||||
users, err := t.listUsers()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(users) >= t.successfulWrites).To(BeTrue())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(len(users) >= t.successfulWrites).To(gomega.BeTrue())
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
// HPAUpgradeTest tests that HPA rescales target resource correctly before and after a cluster upgrade.
|
||||
|
@ -33,9 +33,10 @@ type HPAUpgradeTest struct {
|
|||
hpa *autoscalingv1.HorizontalPodAutoscaler
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
func (HPAUpgradeTest) Name() string { return "hpa-upgrade" }
|
||||
|
||||
// Creates a resource consumer and an HPA object that autoscales the consumer.
|
||||
// Setup creates a resource consumer and an HPA object that autoscales the consumer.
|
||||
func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
|
||||
t.rc = common.NewDynamicResourceConsumer(
|
||||
"res-cons-upgrade",
|
||||
|
@ -63,7 +64,7 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
|
|||
// Test waits for upgrade to complete and verifies if HPA works correctly.
|
||||
func (t *HPAUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
|
||||
// Block until upgrade is done
|
||||
By(fmt.Sprintf("Waiting for upgrade to finish before checking HPA"))
|
||||
ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking HPA"))
|
||||
<-done
|
||||
t.test()
|
||||
}
|
||||
|
@ -79,19 +80,19 @@ func (t *HPAUpgradeTest) test() {
|
|||
const timeToWait = 15 * time.Minute
|
||||
t.rc.Resume()
|
||||
|
||||
By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1."))
|
||||
ginkgo.By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1."))
|
||||
t.rc.ConsumeCPU(10) /* millicores */
|
||||
By(fmt.Sprintf("HPA waits for 1 replica"))
|
||||
ginkgo.By(fmt.Sprintf("HPA waits for 1 replica"))
|
||||
t.rc.WaitForReplicas(1, timeToWait)
|
||||
|
||||
By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores."))
|
||||
ginkgo.By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores."))
|
||||
t.rc.ConsumeCPU(250) /* millicores */
|
||||
By(fmt.Sprintf("HPA waits for 3 replicas"))
|
||||
ginkgo.By(fmt.Sprintf("HPA waits for 3 replicas"))
|
||||
t.rc.WaitForReplicas(3, timeToWait)
|
||||
|
||||
By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5."))
|
||||
ginkgo.By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5."))
|
||||
t.rc.ConsumeCPU(700) /* millicores */
|
||||
By(fmt.Sprintf("HPA waits for 5 replicas"))
|
||||
ginkgo.By(fmt.Sprintf("HPA waits for 5 replicas"))
|
||||
t.rc.WaitForReplicas(5, timeToWait)
|
||||
|
||||
// We need to pause background goroutines as during upgrade master is unavailable and requests issued by them fail.
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"reflect"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
|
@ -61,10 +61,11 @@ type GCPResourceStore struct {
|
|||
TpsList []*compute.TargetHttpsProxy
|
||||
SslList []*compute.SslCertificate
|
||||
BeList []*compute.BackendService
|
||||
Ip *compute.Address
|
||||
IP *compute.Address
|
||||
IgList []*compute.InstanceGroup
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
func (IngressUpgradeTest) Name() string { return "ingress-upgrade" }
|
||||
|
||||
// Setup creates a GLBC, allocates an ip, and an ingress resource,
|
||||
|
@ -96,17 +97,17 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
|
|||
t.ip = t.gceController.CreateStaticIP(t.ipName)
|
||||
|
||||
// Create a working basic Ingress
|
||||
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip))
|
||||
ginkgo.By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip))
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip-2"), ns.Name, map[string]string{
|
||||
ingress.IngressStaticIPKey: t.ipName,
|
||||
ingress.IngressAllowHTTPKey: "false",
|
||||
}, map[string]string{})
|
||||
t.jig.SetHTTPS("tls-secret", "ingress.test.com")
|
||||
|
||||
By("waiting for Ingress to come up with ip: " + t.ip)
|
||||
ginkgo.By("waiting for Ingress to come up with ip: " + t.ip)
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
|
||||
|
||||
By("keeping track of GCP resources created by Ingress")
|
||||
ginkgo.By("keeping track of GCP resources created by Ingress")
|
||||
t.resourceStore = &GCPResourceStore{}
|
||||
t.populateGCPResourceStore(t.resourceStore)
|
||||
}
|
||||
|
@ -134,18 +135,18 @@ func (t *IngressUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
|
|||
|
||||
// Teardown cleans up any remaining resources.
|
||||
func (t *IngressUpgradeTest) Teardown(f *framework.Framework) {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DescribeIng(t.gceController.Ns)
|
||||
}
|
||||
|
||||
if t.jig.Ingress != nil {
|
||||
By("Deleting ingress")
|
||||
ginkgo.By("Deleting ingress")
|
||||
t.jig.TryDeleteIngress()
|
||||
} else {
|
||||
By("No ingress created, no cleanup necessary")
|
||||
ginkgo.By("No ingress created, no cleanup necessary")
|
||||
}
|
||||
|
||||
By("Cleaning up cloud resources")
|
||||
ginkgo.By("Cleaning up cloud resources")
|
||||
framework.ExpectNoError(t.gceController.CleanupGCEIngressController())
|
||||
}
|
||||
|
||||
|
@ -171,20 +172,20 @@ func (t *IngressUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
|||
|
||||
func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
|
||||
if testDuringDisruption {
|
||||
By("continuously hitting the Ingress IP")
|
||||
ginkgo.By("continuously hitting the Ingress IP")
|
||||
wait.Until(func() {
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
|
||||
}, t.jig.PollInterval, done)
|
||||
} else {
|
||||
By("waiting for upgrade to finish without checking if Ingress remains up")
|
||||
ginkgo.By("waiting for upgrade to finish without checking if Ingress remains up")
|
||||
<-done
|
||||
}
|
||||
By("hitting the Ingress IP " + t.ip)
|
||||
ginkgo.By("hitting the Ingress IP " + t.ip)
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
|
||||
|
||||
// We want to manually trigger a sync because then we can easily verify
|
||||
// a correct sync completed after update.
|
||||
By("updating ingress spec to manually trigger a sync")
|
||||
ginkgo.By("updating ingress spec to manually trigger a sync")
|
||||
t.jig.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths = append(
|
||||
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths,
|
||||
|
@ -197,7 +198,7 @@ func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}
|
|||
// WaitForIngress() tests that all paths are pinged, which is how we know
|
||||
// everything is synced with the cloud.
|
||||
t.jig.WaitForIngress(false)
|
||||
By("comparing GCP resources post-upgrade")
|
||||
ginkgo.By("comparing GCP resources post-upgrade")
|
||||
postUpgradeResourceStore := &GCPResourceStore{}
|
||||
t.populateGCPResourceStore(postUpgradeResourceStore)
|
||||
|
||||
|
@ -238,7 +239,7 @@ func (t *IngressUpgradeTest) populateGCPResourceStore(resourceStore *GCPResource
|
|||
resourceStore.TpsList = cont.ListTargetHttpsProxies()
|
||||
resourceStore.SslList = cont.ListSslCertificates()
|
||||
resourceStore.BeList = cont.ListGlobalBackendServices()
|
||||
resourceStore.Ip = cont.GetGlobalAddress(t.ipName)
|
||||
resourceStore.IP = cont.GetGlobalAddress(t.ipName)
|
||||
resourceStore.IgList = cont.ListInstanceGroups()
|
||||
}
|
||||
|
||||
|
|
|
@ -28,8 +28,8 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -43,12 +43,13 @@ const (
|
|||
type KubeProxyUpgradeTest struct {
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
func (KubeProxyUpgradeTest) Name() string { return "[sig-network] kube-proxy-upgrade" }
|
||||
|
||||
// Setup verifies kube-proxy static pods is running before uprgade.
|
||||
func (t *KubeProxyUpgradeTest) Setup(f *framework.Framework) {
|
||||
By("Waiting for kube-proxy static pods running and ready")
|
||||
Expect(waitForKubeProxyStaticPodsRunning(f.ClientSet)).NotTo(HaveOccurred())
|
||||
ginkgo.By("Waiting for kube-proxy static pods running and ready")
|
||||
gomega.Expect(waitForKubeProxyStaticPodsRunning(f.ClientSet)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// Test validates if kube-proxy is migrated from static pods to DaemonSet.
|
||||
|
@ -56,14 +57,14 @@ func (t *KubeProxyUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
|
|||
c := f.ClientSet
|
||||
|
||||
// Block until upgrade is done.
|
||||
By("Waiting for upgrade to finish")
|
||||
ginkgo.By("Waiting for upgrade to finish")
|
||||
<-done
|
||||
|
||||
By("Waiting for kube-proxy static pods disappear")
|
||||
Expect(waitForKubeProxyStaticPodsDisappear(c)).NotTo(HaveOccurred())
|
||||
ginkgo.By("Waiting for kube-proxy static pods disappear")
|
||||
gomega.Expect(waitForKubeProxyStaticPodsDisappear(c)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Waiting for kube-proxy DaemonSet running and ready")
|
||||
Expect(waitForKubeProxyDaemonSetRunning(c)).NotTo(HaveOccurred())
|
||||
ginkgo.By("Waiting for kube-proxy DaemonSet running and ready")
|
||||
gomega.Expect(waitForKubeProxyDaemonSetRunning(c)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// Teardown does nothing.
|
||||
|
@ -74,12 +75,13 @@ func (t *KubeProxyUpgradeTest) Teardown(f *framework.Framework) {
|
|||
type KubeProxyDowngradeTest struct {
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
func (KubeProxyDowngradeTest) Name() string { return "[sig-network] kube-proxy-downgrade" }
|
||||
|
||||
// Setup verifies kube-proxy DaemonSet is running before uprgade.
|
||||
func (t *KubeProxyDowngradeTest) Setup(f *framework.Framework) {
|
||||
By("Waiting for kube-proxy DaemonSet running and ready")
|
||||
Expect(waitForKubeProxyDaemonSetRunning(f.ClientSet)).NotTo(HaveOccurred())
|
||||
ginkgo.By("Waiting for kube-proxy DaemonSet running and ready")
|
||||
gomega.Expect(waitForKubeProxyDaemonSetRunning(f.ClientSet)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// Test validates if kube-proxy is migrated from DaemonSet to static pods.
|
||||
|
@ -87,14 +89,14 @@ func (t *KubeProxyDowngradeTest) Test(f *framework.Framework, done <-chan struct
|
|||
c := f.ClientSet
|
||||
|
||||
// Block until upgrade is done.
|
||||
By("Waiting for upgrade to finish")
|
||||
ginkgo.By("Waiting for upgrade to finish")
|
||||
<-done
|
||||
|
||||
By("Waiting for kube-proxy DaemonSet disappear")
|
||||
Expect(waitForKubeProxyDaemonSetDisappear(c)).NotTo(HaveOccurred())
|
||||
ginkgo.By("Waiting for kube-proxy DaemonSet disappear")
|
||||
gomega.Expect(waitForKubeProxyDaemonSetDisappear(c)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Waiting for kube-proxy static pods running and ready")
|
||||
Expect(waitForKubeProxyStaticPodsRunning(c)).NotTo(HaveOccurred())
|
||||
ginkgo.By("Waiting for kube-proxy static pods running and ready")
|
||||
gomega.Expect(waitForKubeProxyStaticPodsRunning(c)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// Teardown does nothing.
|
||||
|
|
|
@ -25,8 +25,8 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
|
@ -37,17 +37,19 @@ import (
|
|||
|
||||
const mysqlManifestPath = "test/e2e/testing-manifests/statefulset/mysql-upgrade"
|
||||
|
||||
// MySqlUpgradeTest implements an upgrade test harness that polls a replicated sql database.
|
||||
type MySqlUpgradeTest struct {
|
||||
// MySQLUpgradeTest implements an upgrade test harness that polls a replicated sql database.
|
||||
type MySQLUpgradeTest struct {
|
||||
ip string
|
||||
successfulWrites int
|
||||
nextWrite int
|
||||
ssTester *framework.StatefulSetTester
|
||||
}
|
||||
|
||||
func (MySqlUpgradeTest) Name() string { return "mysql-upgrade" }
|
||||
// Name returns the tracking name of the test.
|
||||
func (MySQLUpgradeTest) Name() string { return "mysql-upgrade" }
|
||||
|
||||
func (MySqlUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
||||
// Skip returns true when this test can be skipped.
|
||||
func (MySQLUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
||||
minVersion := version.MustParseSemantic("1.5.0")
|
||||
|
||||
for _, vCtx := range upgCtx.Versions {
|
||||
|
@ -59,13 +61,13 @@ func (MySqlUpgradeTest) Skip(upgCtx UpgradeContext) bool {
|
|||
}
|
||||
|
||||
func mysqlKubectlCreate(ns, file string) {
|
||||
input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file), Fail))
|
||||
input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file), ginkgo.Fail))
|
||||
framework.RunKubectlOrDieInput(input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
|
||||
}
|
||||
|
||||
func (t *MySqlUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
|
||||
func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
|
||||
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ingress := svc.Status.LoadBalancer.Ingress
|
||||
if len(ingress) == 0 {
|
||||
return ""
|
||||
|
@ -77,22 +79,22 @@ func (t *MySqlUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName stri
|
|||
// from the db. It then connects to the db with the write Service and populates the db with a table
|
||||
// and a few entries. Finally, it connects to the db with the read Service, and confirms the data is
|
||||
// available. The db connections are left open to be used later in the test.
|
||||
func (t *MySqlUpgradeTest) Setup(f *framework.Framework) {
|
||||
func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
statefulsetPoll := 30 * time.Second
|
||||
statefulsetTimeout := 10 * time.Minute
|
||||
t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
|
||||
|
||||
By("Creating a configmap")
|
||||
ginkgo.By("Creating a configmap")
|
||||
mysqlKubectlCreate(ns, "configmap.yaml")
|
||||
|
||||
By("Creating a mysql StatefulSet")
|
||||
ginkgo.By("Creating a mysql StatefulSet")
|
||||
t.ssTester.CreateStatefulSet(mysqlManifestPath, ns)
|
||||
|
||||
By("Creating a mysql-test-server deployment")
|
||||
ginkgo.By("Creating a mysql-test-server deployment")
|
||||
mysqlKubectlCreate(ns, "tester.yaml")
|
||||
|
||||
By("Getting the ingress IPs from the test-service")
|
||||
ginkgo.By("Getting the ingress IPs from the test-service")
|
||||
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
|
||||
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
|
||||
return false, nil
|
||||
|
@ -103,24 +105,24 @@ func (t *MySqlUpgradeTest) Setup(f *framework.Framework) {
|
|||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.Logf("Service endpoint is up")
|
||||
|
||||
By("Adding 2 names to the database")
|
||||
Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(HaveOccurred())
|
||||
Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(HaveOccurred())
|
||||
ginkgo.By("Adding 2 names to the database")
|
||||
gomega.Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Verifying that the 2 names have been inserted")
|
||||
ginkgo.By("Verifying that the 2 names have been inserted")
|
||||
count, err := t.countNames()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(count).To(Equal(2))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(count).To(gomega.Equal(2))
|
||||
}
|
||||
|
||||
// Test continually polls the db using the read and write connections, inserting data, and checking
|
||||
// that all the data is readable.
|
||||
func (t *MySqlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
|
||||
func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
|
||||
var writeSuccess, readSuccess, writeFailure, readFailure int
|
||||
By("Continuously polling the database during upgrade.")
|
||||
ginkgo.By("Continuously polling the database during upgrade.")
|
||||
go wait.Until(func() {
|
||||
_, err := t.countNames()
|
||||
if err != nil {
|
||||
|
@ -162,14 +164,14 @@ func (t *MySqlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
|
|||
}
|
||||
|
||||
// Teardown performs one final check of the data's availability.
|
||||
func (t *MySqlUpgradeTest) Teardown(f *framework.Framework) {
|
||||
func (t *MySQLUpgradeTest) Teardown(f *framework.Framework) {
|
||||
count, err := t.countNames()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(count >= t.successfulWrites).To(BeTrue())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(count >= t.successfulWrites).To(gomega.BeTrue())
|
||||
}
|
||||
|
||||
// addName adds a new value to the db.
|
||||
func (t *MySqlUpgradeTest) addName(name string) error {
|
||||
func (t *MySQLUpgradeTest) addName(name string) error {
|
||||
val := map[string][]string{"name": {name}}
|
||||
t.nextWrite++
|
||||
r, err := http.PostForm(fmt.Sprintf("http://%s:8080/addName", t.ip), val)
|
||||
|
@ -189,7 +191,7 @@ func (t *MySqlUpgradeTest) addName(name string) error {
|
|||
|
||||
// countNames checks to make sure the values in testing.users are available, and returns
|
||||
// the count of them.
|
||||
func (t *MySqlUpgradeTest) countNames() (int, error) {
|
||||
func (t *MySQLUpgradeTest) countNames() (int, error) {
|
||||
r, err := http.Get(fmt.Sprintf("http://%s:8080/countNames", t.ip))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
|
|
@ -26,8 +26,8 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// NvidiaGPUUpgradeTest tests that gpu resource is available before and after
|
||||
|
@ -35,12 +35,13 @@ import (
|
|||
type NvidiaGPUUpgradeTest struct {
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
func (NvidiaGPUUpgradeTest) Name() string { return "nvidia-gpu-upgrade [sig-node] [sig-scheduling]" }
|
||||
|
||||
// Setup creates a job requesting gpu.
|
||||
func (t *NvidiaGPUUpgradeTest) Setup(f *framework.Framework) {
|
||||
scheduling.SetupNVIDIAGPUNode(f, false)
|
||||
By("Creating a job requesting gpu")
|
||||
ginkgo.By("Creating a job requesting gpu")
|
||||
t.startJob(f)
|
||||
}
|
||||
|
||||
|
@ -48,13 +49,13 @@ func (t *NvidiaGPUUpgradeTest) Setup(f *framework.Framework) {
|
|||
// cuda pod started by the gpu job can successfully finish.
|
||||
func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
|
||||
<-done
|
||||
By("Verifying gpu job success")
|
||||
ginkgo.By("Verifying gpu job success")
|
||||
t.verifyJobPodSuccess(f)
|
||||
if upgrade == MasterUpgrade || upgrade == ClusterUpgrade {
|
||||
// MasterUpgrade should be totally hitless.
|
||||
job, err := framework.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(job.Status.Failed).To(BeZero(), "Job pods failed during master upgrade: %v", job.Status.Failed)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(job.Status.Failed).To(gomega.BeZero(), "Job pods failed during master upgrade: %v", job.Status.Failed)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,12 +86,12 @@ func (t *NvidiaGPUUpgradeTest) startJob(f *framework.Framework) {
|
|||
}
|
||||
ns := f.Namespace.Name
|
||||
_, err := framework.CreateJob(f.ClientSet, ns, testJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.Logf("Created job %v", testJob)
|
||||
By("Waiting for gpu job pod start")
|
||||
ginkgo.By("Waiting for gpu job pod start")
|
||||
err = framework.WaitForAllJobPodsRunning(f.ClientSet, ns, testJob.Name, 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Done with gpu job pod start")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ginkgo.By("Done with gpu job pod start")
|
||||
}
|
||||
|
||||
// verifyJobPodSuccess verifies that the started cuda pod successfully passes.
|
||||
|
@ -98,9 +99,9 @@ func (t *NvidiaGPUUpgradeTest) verifyJobPodSuccess(f *framework.Framework) {
|
|||
// Wait for client pod to complete.
|
||||
ns := f.Namespace.Name
|
||||
err := framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, "cuda-add", 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
createdPod := pods.Items[0].Name
|
||||
framework.Logf("Created pod %v", createdPod)
|
||||
f.PodClient().WaitForSuccess(createdPod, 5*time.Minute)
|
||||
|
@ -108,5 +109,5 @@ func (t *NvidiaGPUUpgradeTest) verifyJobPodSuccess(f *framework.Framework) {
|
|||
framework.ExpectNoError(err, "Should be able to get pod logs")
|
||||
framework.Logf("Got pod logs: %v", logs)
|
||||
regex := regexp.MustCompile("PASSED")
|
||||
Expect(regex.MatchString(logs)).To(BeTrue())
|
||||
gomega.Expect(regex.MatchString(logs)).To(gomega.BeTrue())
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
// SecretUpgradeTest test that a secret is available before and after
|
||||
|
@ -34,6 +34,7 @@ type SecretUpgradeTest struct {
|
|||
secret *v1.Secret
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
func (SecretUpgradeTest) Name() string { return "[sig-storage] [sig-api-machinery] secret-upgrade" }
|
||||
|
||||
// Setup creates a secret and then verifies that a pod can consume it.
|
||||
|
@ -52,13 +53,13 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) {
|
|||
},
|
||||
}
|
||||
|
||||
By("Creating a secret")
|
||||
ginkgo.By("Creating a secret")
|
||||
var err error
|
||||
if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", t.secret.Name, err)
|
||||
}
|
||||
|
||||
By("Making sure the secret is consumable")
|
||||
ginkgo.By("Making sure the secret is consumable")
|
||||
t.testPod(f)
|
||||
}
|
||||
|
||||
|
@ -66,7 +67,7 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) {
|
|||
// pod can still consume the secret.
|
||||
func (t *SecretUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
|
||||
<-done
|
||||
By("Consuming the secret after upgrade")
|
||||
ginkgo.By("Consuming the secret after upgrade")
|
||||
t.testPod(f)
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
// ServiceUpgradeTest tests that a service is available before and
|
||||
|
@ -34,6 +34,7 @@ type ServiceUpgradeTest struct {
|
|||
svcPort int
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
func (ServiceUpgradeTest) Name() string { return "service-upgrade" }
|
||||
|
||||
func shouldTestPDBs() bool { return framework.ProviderIs("gce", "gke") }
|
||||
|
@ -45,7 +46,7 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
|
|||
|
||||
ns := f.Namespace
|
||||
|
||||
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + ns.Name)
|
||||
ginkgo.By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + ns.Name)
|
||||
tcpService := jig.CreateTCPServiceOrFail(ns.Name, func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
})
|
||||
|
@ -56,16 +57,16 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
|
|||
tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
|
||||
svcPort := int(tcpService.Spec.Ports[0].Port)
|
||||
|
||||
By("creating pod to be part of service " + serviceName)
|
||||
ginkgo.By("creating pod to be part of service " + serviceName)
|
||||
rc := jig.RunOrFail(ns.Name, jig.AddRCAntiAffinity)
|
||||
|
||||
if shouldTestPDBs() {
|
||||
By("creating a PodDisruptionBudget to cover the ReplicationController")
|
||||
ginkgo.By("creating a PodDisruptionBudget to cover the ReplicationController")
|
||||
jig.CreatePDBOrFail(ns.Name, rc)
|
||||
}
|
||||
|
||||
// Hit it once before considering ourselves ready
|
||||
By("hitting the pod through the service's LoadBalancer")
|
||||
ginkgo.By("hitting the pod through the service's LoadBalancer")
|
||||
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
|
||||
|
||||
t.jig = jig
|
||||
|
@ -95,18 +96,18 @@ func (t *ServiceUpgradeTest) Teardown(f *framework.Framework) {
|
|||
func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
|
||||
if testDuringDisruption {
|
||||
// Continuous validation
|
||||
By("continuously hitting the pod through the service's LoadBalancer")
|
||||
ginkgo.By("continuously hitting the pod through the service's LoadBalancer")
|
||||
wait.Until(func() {
|
||||
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
|
||||
}, framework.Poll, done)
|
||||
} else {
|
||||
// Block until upgrade is done
|
||||
By("waiting for upgrade to finish without checking if service remains up")
|
||||
ginkgo.By("waiting for upgrade to finish without checking if service remains up")
|
||||
<-done
|
||||
}
|
||||
|
||||
// Sanity check and hit it once more
|
||||
By("hitting the pod through the service's LoadBalancer")
|
||||
ginkgo.By("hitting the pod through the service's LoadBalancer")
|
||||
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
|
||||
t.jig.SanityCheckService(t.tcpService, v1.ServiceTypeLoadBalancer)
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@ package upgrades
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -32,7 +32,7 @@ import (
|
|||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// SecretUpgradeTest tests that a pod with sysctls runs before and after an upgrade. During
|
||||
// SysctlUpgradeTest tests that a pod with sysctls runs before and after an upgrade. During
|
||||
// a master upgrade, the exact pod is expected to stay running. A pod with unsafe sysctls is
|
||||
// expected to keep failing before and after the upgrade.
|
||||
type SysctlUpgradeTest struct {
|
||||
|
@ -53,19 +53,19 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u
|
|||
<-done
|
||||
switch upgrade {
|
||||
case MasterUpgrade, ClusterUpgrade:
|
||||
By("Checking the safe sysctl pod keeps running on master upgrade")
|
||||
ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning))
|
||||
}
|
||||
|
||||
By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
|
||||
ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
if err == nil {
|
||||
Expect(pod.Status.Phase).NotTo(Equal(v1.PodRunning))
|
||||
gomega.Expect(pod.Status.Phase).NotTo(gomega.Equal(v1.PodRunning))
|
||||
}
|
||||
|
||||
t.verifySafeSysctlWork(f)
|
||||
|
@ -78,15 +78,15 @@ func (t *SysctlUpgradeTest) Teardown(f *framework.Framework) {
|
|||
}
|
||||
|
||||
func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod {
|
||||
By("Creating a pod with safe sysctls")
|
||||
ginkgo.By("Creating a pod with safe sysctls")
|
||||
safeSysctl := "net.ipv4.ip_local_port_range"
|
||||
safeSysctlValue := "1024 1042"
|
||||
validPod := sysctlTestPod("valid-sysctls", map[string]string{safeSysctl: safeSysctlValue})
|
||||
validPod = f.PodClient().Create(t.validPod)
|
||||
|
||||
By("Making sure the valid pod launches")
|
||||
ginkgo.By("Making sure the valid pod launches")
|
||||
ev, err := f.PodClient().WaitForErrorEventOrSuccess(t.validPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
|
||||
framework.Skipf("No sysctl support in Docker <1.12")
|
||||
}
|
||||
|
@ -96,19 +96,19 @@ func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod
|
|||
}
|
||||
|
||||
func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framework) *v1.Pod {
|
||||
By("Creating a pod with unsafe sysctls")
|
||||
ginkgo.By("Creating a pod with unsafe sysctls")
|
||||
invalidPod := sysctlTestPod("valid-sysctls-"+string(uuid.NewUUID()), map[string]string{
|
||||
"fs.mount-max": "1000000",
|
||||
})
|
||||
invalidPod = f.PodClient().Create(invalidPod)
|
||||
|
||||
By("Making sure the invalid pod failed")
|
||||
ginkgo.By("Making sure the invalid pod failed")
|
||||
ev, err := f.PodClient().WaitForErrorEventOrSuccess(invalidPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
|
||||
framework.Skipf("No sysctl support in Docker <1.12")
|
||||
}
|
||||
Expect(ev.Reason).To(Equal(sysctl.ForbiddenReason))
|
||||
gomega.Expect(ev.Reason).To(gomega.Equal(sysctl.ForbiddenReason))
|
||||
|
||||
return invalidPod
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue