mirror of https://github.com/k3s-io/k3s
fix golint failures of test/e2e/apimachinery
parent
9946d92e16
commit
91bd1ac1ea
|
@ -632,7 +632,6 @@ staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
|
|||
staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller
|
||||
staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1
|
||||
test/e2e
|
||||
test/e2e/apimachinery
|
||||
test/e2e/apps
|
||||
test/e2e/auth
|
||||
test/e2e/autoscaling
|
||||
|
|
|
@ -44,7 +44,7 @@ import (
|
|||
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var serverAggregatorVersion = utilversion.MustParseSemantic("v1.10.0")
|
||||
|
@ -62,7 +62,7 @@ var _ = SIGDescribe("Aggregator", func() {
|
|||
// We want cleanTest to happen before the namespace cleanup AfterEach
|
||||
// inserted by NewDefaultFramework, so we put this AfterEach in front
|
||||
// of NewDefaultFramework.
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
cleanTest(c, aggrclient, ns)
|
||||
})
|
||||
|
||||
|
@ -71,7 +71,7 @@ var _ = SIGDescribe("Aggregator", func() {
|
|||
// We want namespace initialization BeforeEach inserted by
|
||||
// NewDefaultFramework to happen before this, so we put this BeforeEach
|
||||
// after NewDefaultFramework.
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
aggrclient = f.AggregatorClient
|
||||
|
@ -102,10 +102,10 @@ func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientse
|
|||
_ = client.RbacV1beta1().ClusterRoleBindings().Delete("wardler:"+namespace+":sample-apiserver-reader", nil)
|
||||
}
|
||||
|
||||
// A basic test if the sample-apiserver code from 1.10 and compiled against 1.10
|
||||
// TestSampleAPIServer is a basic test if the sample-apiserver code from 1.10 and compiled against 1.10
|
||||
// will work on the current Aggregator/API-Server.
|
||||
func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
By("Registering the sample API server.")
|
||||
ginkgo.By("Registering the sample API server.")
|
||||
client := f.ClientSet
|
||||
restClient := client.Discovery().RESTClient()
|
||||
aggrclient := f.AggregatorClient
|
||||
|
|
|
@ -23,8 +23,8 @@ import (
|
|||
"reflect"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -40,11 +40,11 @@ const numberOfTotalResources = 400
|
|||
var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
f := framework.NewDefaultFramework("chunking")
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
client := c.CoreV1().PodTemplates(ns)
|
||||
By("creating a large number of resources")
|
||||
ginkgo.By("creating a large number of resources")
|
||||
workqueue.ParallelizeUntil(context.TODO(), 20, numberOfTotalResources, func(i int) {
|
||||
for tries := 3; tries >= 0; tries-- {
|
||||
_, err := client.Create(&v1.PodTemplate{
|
||||
|
@ -64,15 +64,15 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||
}
|
||||
framework.Logf("Got an error creating template %d: %v", i, err)
|
||||
}
|
||||
Fail("Unable to create template %d, exiting", i)
|
||||
ginkgo.Fail("Unable to create template %d, exiting", i)
|
||||
})
|
||||
})
|
||||
|
||||
It("should return chunks of results for list calls", func() {
|
||||
ginkgo.It("should return chunks of results for list calls", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
client := c.CoreV1().PodTemplates(ns)
|
||||
By("retrieving those results in paged fashion several times")
|
||||
ginkgo.By("retrieving those results in paged fashion several times")
|
||||
for i := 0; i < 3; i++ {
|
||||
opts := metav1.ListOptions{}
|
||||
found := 0
|
||||
|
@ -80,16 +80,16 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||
for {
|
||||
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
|
||||
list, err := client.List(opts)
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
|
||||
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
|
||||
|
||||
if len(lastRV) == 0 {
|
||||
lastRV = list.ResourceVersion
|
||||
}
|
||||
Expect(list.ResourceVersion).To(Equal(lastRV))
|
||||
gomega.Expect(list.ResourceVersion).To(gomega.Equal(lastRV))
|
||||
for _, item := range list.Items {
|
||||
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
|
||||
gomega.Expect(item.Name).To(gomega.Equal(fmt.Sprintf("template-%04d", found)))
|
||||
found++
|
||||
}
|
||||
if len(list.Continue) == 0 {
|
||||
|
@ -97,32 +97,32 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||
}
|
||||
opts.Continue = list.Continue
|
||||
}
|
||||
Expect(found).To(BeNumerically("==", numberOfTotalResources))
|
||||
gomega.Expect(found).To(gomega.BeNumerically("==", numberOfTotalResources))
|
||||
}
|
||||
|
||||
By("retrieving those results all at once")
|
||||
ginkgo.By("retrieving those results all at once")
|
||||
opts := metav1.ListOptions{Limit: numberOfTotalResources + 1}
|
||||
list, err := client.List(opts)
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
Expect(list.Items).To(HaveLen(numberOfTotalResources))
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources))
|
||||
})
|
||||
|
||||
It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent", func() {
|
||||
ginkgo.It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
client := c.CoreV1().PodTemplates(ns)
|
||||
|
||||
By("retrieving the first page")
|
||||
ginkgo.By("retrieving the first page")
|
||||
oneTenth := int64(numberOfTotalResources / 10)
|
||||
opts := metav1.ListOptions{}
|
||||
opts.Limit = oneTenth
|
||||
list, err := client.List(opts)
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
firstToken := list.Continue
|
||||
firstRV := list.ResourceVersion
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
|
||||
|
||||
By("retrieving the second page until the token expires")
|
||||
ginkgo.By("retrieving the second page until the token expires")
|
||||
opts.Continue = firstToken
|
||||
var inconsistentToken string
|
||||
wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) {
|
||||
|
@ -147,29 +147,29 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||
return true, nil
|
||||
})
|
||||
|
||||
By("retrieving the second page again with the token received with the error message")
|
||||
ginkgo.By("retrieving the second page again with the token received with the error message")
|
||||
opts.Continue = inconsistentToken
|
||||
list, err = client.List(opts)
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit)
|
||||
Expect(list.ResourceVersion).ToNot(Equal(firstRV))
|
||||
Expect(len(list.Items)).To(BeNumerically("==", opts.Limit))
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit)
|
||||
gomega.Expect(list.ResourceVersion).ToNot(gomega.Equal(firstRV))
|
||||
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("==", opts.Limit))
|
||||
found := oneTenth
|
||||
for _, item := range list.Items {
|
||||
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
|
||||
gomega.Expect(item.Name).To(gomega.Equal(fmt.Sprintf("template-%04d", found)))
|
||||
found++
|
||||
}
|
||||
|
||||
By("retrieving all remaining pages")
|
||||
ginkgo.By("retrieving all remaining pages")
|
||||
opts.Continue = list.Continue
|
||||
lastRV := list.ResourceVersion
|
||||
for {
|
||||
list, err := client.List(opts)
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
|
||||
Expect(list.ResourceVersion).To(Equal(lastRV))
|
||||
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
|
||||
gomega.Expect(list.ResourceVersion).To(gomega.Equal(lastRV))
|
||||
for _, item := range list.Items {
|
||||
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
|
||||
gomega.Expect(item.Name).To(gomega.Equal(fmt.Sprintf("template-%04d", found)))
|
||||
found++
|
||||
}
|
||||
if len(list.Continue) == 0 {
|
||||
|
@ -177,6 +177,6 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||
}
|
||||
opts.Continue = list.Continue
|
||||
}
|
||||
Expect(found).To(BeNumerically("==", numberOfTotalResources))
|
||||
gomega.Expect(found).To(gomega.BeNumerically("==", numberOfTotalResources))
|
||||
})
|
||||
})
|
||||
|
|
|
@ -36,8 +36,9 @@ import (
|
|||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
// ensure libs have a chance to initialize
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
@ -64,7 +65,7 @@ var apiVersions = []v1beta1.CustomResourceDefinitionVersion{
|
|||
},
|
||||
}
|
||||
|
||||
var alternativeApiVersions = []v1beta1.CustomResourceDefinitionVersion{
|
||||
var alternativeAPIVersions = []v1beta1.CustomResourceDefinitionVersion{
|
||||
{
|
||||
Name: "v1",
|
||||
Served: true,
|
||||
|
@ -84,25 +85,25 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Feature:CustomResourceWebh
|
|||
var client clientset.Interface
|
||||
var namespaceName string
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
client = f.ClientSet
|
||||
namespaceName = f.Namespace.Name
|
||||
|
||||
// Make sure the relevant provider supports conversion webhook
|
||||
framework.SkipUnlessServerVersionGTE(serverCRDConversionWebhookVersion, f.ClientSet.Discovery())
|
||||
|
||||
By("Setting up server cert")
|
||||
ginkgo.By("Setting up server cert")
|
||||
context = setupServerCert(f.Namespace.Name, serviceCRDName)
|
||||
createAuthReaderRoleBindingForCRDConversion(f, f.Namespace.Name)
|
||||
|
||||
deployCustomResourceWebhookAndService(f, imageutils.GetE2EImage(imageutils.CRDConversionWebhook), context)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
cleanCRDWebhookTest(client, namespaceName)
|
||||
})
|
||||
|
||||
It("Should be able to convert from CR v1 to CR v2", func() {
|
||||
ginkgo.It("Should be able to convert from CR v1 to CR v2", func() {
|
||||
testcrd, err := crd.CreateMultiVersionTestCRD(f, "stable.example.com", apiVersions,
|
||||
&v1beta1.WebhookClientConfig{
|
||||
CABundle: context.signingCert,
|
||||
|
@ -119,7 +120,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Feature:CustomResourceWebh
|
|||
testCustomResourceConversionWebhook(f, testcrd.Crd, testcrd.DynamicClients)
|
||||
})
|
||||
|
||||
It("Should be able to convert a non homogeneous list of CRs", func() {
|
||||
ginkgo.It("Should be able to convert a non homogeneous list of CRs", func() {
|
||||
testcrd, err := crd.CreateMultiVersionTestCRD(f, "stable.example.com", apiVersions,
|
||||
&v1beta1.WebhookClientConfig{
|
||||
CABundle: context.signingCert,
|
||||
|
@ -145,7 +146,7 @@ func cleanCRDWebhookTest(client clientset.Interface, namespaceName string) {
|
|||
}
|
||||
|
||||
func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespace string) {
|
||||
By("Create role binding to let cr conversion webhook read extension-apiserver-authentication")
|
||||
ginkgo.By("Create role binding to let cr conversion webhook read extension-apiserver-authentication")
|
||||
client := f.ClientSet
|
||||
// Create the role binding to allow the webhook read the extension-apiserver-authentication configmap
|
||||
_, err := client.RbacV1().RoleBindings("kube-system").Create(&rbacv1.RoleBinding{
|
||||
|
@ -174,7 +175,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa
|
|||
}
|
||||
|
||||
func deployCustomResourceWebhookAndService(f *framework.Framework, image string, context *certContext) {
|
||||
By("Deploying the custom resource conversion webhook pod")
|
||||
ginkgo.By("Deploying the custom resource conversion webhook pod")
|
||||
client := f.ClientSet
|
||||
|
||||
// Creating the secret that contains the webhook's cert.
|
||||
|
@ -252,13 +253,13 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
|
|||
}
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
|
||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace)
|
||||
By("Wait for the deployment to be ready")
|
||||
ginkgo.By("Wait for the deployment to be ready")
|
||||
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image)
|
||||
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
|
||||
err = framework.WaitForDeploymentComplete(client, deployment)
|
||||
framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentCRDName, namespace)
|
||||
|
||||
By("Deploying the webhook service")
|
||||
ginkgo.By("Deploying the webhook service")
|
||||
|
||||
serviceLabels := map[string]string{"crd-webhook": "true"}
|
||||
service := &v1.Service{
|
||||
|
@ -281,37 +282,37 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
|
|||
_, err = client.CoreV1().Services(namespace).Create(service)
|
||||
framework.ExpectNoError(err, "creating service %s in namespace %s", serviceCRDName, namespace)
|
||||
|
||||
By("Verifying the service has paired with the endpoint")
|
||||
ginkgo.By("Verifying the service has paired with the endpoint")
|
||||
err = framework.WaitForServiceEndpointsNum(client, namespace, serviceCRDName, 1, 1*time.Second, 30*time.Second)
|
||||
framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceCRDName, 1)
|
||||
}
|
||||
|
||||
func verifyV1Object(f *framework.Framework, crd *v1beta1.CustomResourceDefinition, obj *unstructured.Unstructured) {
|
||||
Expect(obj.GetAPIVersion()).To(BeEquivalentTo(crd.Spec.Group + "/v1"))
|
||||
gomega.Expect(obj.GetAPIVersion()).To(gomega.BeEquivalentTo(crd.Spec.Group + "/v1"))
|
||||
hostPort, exists := obj.Object["hostPort"]
|
||||
Expect(exists).To(BeTrue())
|
||||
Expect(hostPort).To(BeEquivalentTo("localhost:8080"))
|
||||
gomega.Expect(exists).To(gomega.BeTrue())
|
||||
gomega.Expect(hostPort).To(gomega.BeEquivalentTo("localhost:8080"))
|
||||
_, hostExists := obj.Object["host"]
|
||||
Expect(hostExists).To(BeFalse())
|
||||
gomega.Expect(hostExists).To(gomega.BeFalse())
|
||||
_, portExists := obj.Object["port"]
|
||||
Expect(portExists).To(BeFalse())
|
||||
gomega.Expect(portExists).To(gomega.BeFalse())
|
||||
}
|
||||
|
||||
func verifyV2Object(f *framework.Framework, crd *v1beta1.CustomResourceDefinition, obj *unstructured.Unstructured) {
|
||||
Expect(obj.GetAPIVersion()).To(BeEquivalentTo(crd.Spec.Group + "/v2"))
|
||||
gomega.Expect(obj.GetAPIVersion()).To(gomega.BeEquivalentTo(crd.Spec.Group + "/v2"))
|
||||
_, hostPortExists := obj.Object["hostPort"]
|
||||
Expect(hostPortExists).To(BeFalse())
|
||||
gomega.Expect(hostPortExists).To(gomega.BeFalse())
|
||||
host, hostExists := obj.Object["host"]
|
||||
Expect(hostExists).To(BeTrue())
|
||||
Expect(host).To(BeEquivalentTo("localhost"))
|
||||
gomega.Expect(hostExists).To(gomega.BeTrue())
|
||||
gomega.Expect(host).To(gomega.BeEquivalentTo("localhost"))
|
||||
port, portExists := obj.Object["port"]
|
||||
Expect(portExists).To(BeTrue())
|
||||
Expect(port).To(BeEquivalentTo("8080"))
|
||||
gomega.Expect(portExists).To(gomega.BeTrue())
|
||||
gomega.Expect(port).To(gomega.BeEquivalentTo("8080"))
|
||||
}
|
||||
|
||||
func testCustomResourceConversionWebhook(f *framework.Framework, crd *v1beta1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface) {
|
||||
name := "cr-instance-1"
|
||||
By("Creating a v1 custom resource")
|
||||
ginkgo.By("Creating a v1 custom resource")
|
||||
crInstance := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
|
@ -324,8 +325,8 @@ func testCustomResourceConversionWebhook(f *framework.Framework, crd *v1beta1.Cu
|
|||
},
|
||||
}
|
||||
_, err := customResourceClients["v1"].Create(crInstance, metav1.CreateOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
By("v2 custom resource should be converted")
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
ginkgo.By("v2 custom resource should be converted")
|
||||
v2crd, err := customResourceClients["v2"].Get(name, metav1.GetOptions{})
|
||||
verifyV2Object(f, crd, v2crd)
|
||||
}
|
||||
|
@ -335,7 +336,7 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) {
|
|||
customResourceClients := testCrd.DynamicClients
|
||||
name1 := "cr-instance-1"
|
||||
name2 := "cr-instance-2"
|
||||
By("Creating a v1 custom resource")
|
||||
ginkgo.By("Creating a v1 custom resource")
|
||||
crInstance := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
|
@ -348,14 +349,14 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) {
|
|||
},
|
||||
}
|
||||
_, err := customResourceClients["v1"].Create(crInstance, metav1.CreateOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
|
||||
// Now cr-instance-1 is stored as v1. lets change storage version
|
||||
crd, err = integration.UpdateCustomResourceDefinitionWithRetry(testCrd.APIExtensionClient, crd.Name, func(c *v1beta1.CustomResourceDefinition) {
|
||||
c.Spec.Versions = alternativeApiVersions
|
||||
c.Spec.Versions = alternativeAPIVersions
|
||||
})
|
||||
Expect(err).To(BeNil())
|
||||
By("Create a v2 custom resource")
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
ginkgo.By("Create a v2 custom resource")
|
||||
crInstance = &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
|
@ -377,25 +378,25 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) {
|
|||
break
|
||||
}
|
||||
}
|
||||
Expect(err).To(BeNil())
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
|
||||
// Now that we have a v1 and v2 object, both list operation in v1 and v2 should work as expected.
|
||||
|
||||
By("List CRs in v1")
|
||||
ginkgo.By("List CRs in v1")
|
||||
list, err := customResourceClients["v1"].List(metav1.ListOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
Expect(len(list.Items)).To(BeIdenticalTo(2))
|
||||
Expect((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
|
||||
(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1)).To(BeTrue())
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2))
|
||||
gomega.Expect((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
|
||||
(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1)).To(gomega.BeTrue())
|
||||
verifyV1Object(f, crd, &list.Items[0])
|
||||
verifyV1Object(f, crd, &list.Items[1])
|
||||
|
||||
By("List CRs in v2")
|
||||
ginkgo.By("List CRs in v2")
|
||||
list, err = customResourceClients["v2"].List(metav1.ListOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
Expect(len(list.Items)).To(BeIdenticalTo(2))
|
||||
Expect((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
|
||||
(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1)).To(BeTrue())
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2))
|
||||
gomega.Expect((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
|
||||
(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1)).To(gomega.BeTrue())
|
||||
verifyV2Object(f, crd, &list.Items[0])
|
||||
verifyV2Object(f, crd, &list.Items[1])
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
|
@ -52,11 +52,11 @@ var (
|
|||
var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublishOpenAPI]", func() {
|
||||
f := framework.NewDefaultFramework("crd-publish-openapi")
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessServerVersionGTE(crdPublishOpenAPIVersion, f.ClientSet.Discovery())
|
||||
})
|
||||
|
||||
It("works for CRD with validation schema", func() {
|
||||
ginkgo.It("works for CRD with validation schema", func() {
|
||||
crd, err := setupCRD(f, schemaFoo, "foo", "v1")
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
|
@ -65,7 +65,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
meta := fmt.Sprintf(metaPattern, crd.Kind, crd.APIGroup, crd.Versions[0].Name, "test-foo")
|
||||
ns := fmt.Sprintf("--namespace=%v", f.Namespace.Name)
|
||||
|
||||
By("client-side validation (kubectl create and apply) allows request with known and required properties")
|
||||
ginkgo.By("client-side validation (kubectl create and apply) allows request with known and required properties")
|
||||
validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(validCR, ns, "create", "-f", "-"); err != nil {
|
||||
framework.Failf("failed to create valid CR %s: %v", validCR, err)
|
||||
|
@ -80,7 +80,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
framework.Failf("failed to delete valid CR: %v", err)
|
||||
}
|
||||
|
||||
By("client-side validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema")
|
||||
ginkgo.By("client-side validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema")
|
||||
unknownCR := fmt.Sprintf(`{%s,"spec":{"foo":true}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(unknownCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) {
|
||||
framework.Failf("unexpected no error when creating CR with unknown field: %v", err)
|
||||
|
@ -89,7 +89,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
framework.Failf("unexpected no error when applying CR with unknown field: %v", err)
|
||||
}
|
||||
|
||||
By("client-side validation (kubectl create and apply) rejects request without required properties")
|
||||
ginkgo.By("client-side validation (kubectl create and apply) rejects request without required properties")
|
||||
noRequireCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"age":"10"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(noRequireCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) {
|
||||
framework.Failf("unexpected no error when creating CR without required field: %v", err)
|
||||
|
@ -98,12 +98,12 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
framework.Failf("unexpected no error when applying CR without required field: %v", err)
|
||||
}
|
||||
|
||||
By("kubectl explain works to explain CR properties")
|
||||
ginkgo.By("kubectl explain works to explain CR properties")
|
||||
if err := verifyKubectlExplain(crd.GetPluralName(), `(?s)DESCRIPTION:.*Foo CRD for Testing.*FIELDS:.*apiVersion.*<string>.*APIVersion defines.*spec.*<Object>.*Specification of Foo`); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
By("kubectl explain works to explain CR properties recursively")
|
||||
ginkgo.By("kubectl explain works to explain CR properties recursively")
|
||||
if err := verifyKubectlExplain(crd.GetPluralName()+".metadata", `(?s)DESCRIPTION:.*Standard object's metadata.*FIELDS:.*creationTimestamp.*<string>.*CreationTimestamp is a timestamp`); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
By("kubectl explain works to return error when explain is called on property that doesn't exist")
|
||||
ginkgo.By("kubectl explain works to return error when explain is called on property that doesn't exist")
|
||||
if _, err := framework.RunKubectl("explain", crd.GetPluralName()+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) {
|
||||
framework.Failf("unexpected no error when explaining property that doesn't exist: %v", err)
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
}
|
||||
})
|
||||
|
||||
It("works for CRD without validation schema", func() {
|
||||
ginkgo.It("works for CRD without validation schema", func() {
|
||||
crd, err := setupCRD(f, nil, "empty", "v1")
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
|
@ -133,7 +133,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
meta := fmt.Sprintf(metaPattern, crd.Kind, crd.APIGroup, crd.Versions[0].Name, "test-cr")
|
||||
ns := fmt.Sprintf("--namespace=%v", f.Namespace.Name)
|
||||
|
||||
By("client-side validation (kubectl create and apply) allows request with any unknown properties")
|
||||
ginkgo.By("client-side validation (kubectl create and apply) allows request with any unknown properties")
|
||||
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "create", "-f", "-"); err != nil {
|
||||
framework.Failf("failed to create random CR %s for CRD without schema: %v", randomCR, err)
|
||||
|
@ -148,7 +148,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
|
||||
By("kubectl explain works to explain CR without validation schema")
|
||||
ginkgo.By("kubectl explain works to explain CR without validation schema")
|
||||
if err := verifyKubectlExplain(crd.GetPluralName(), `(?s)DESCRIPTION:.*<empty>`); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
@ -158,8 +158,8 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
}
|
||||
})
|
||||
|
||||
It("works for multiple CRDs of different groups", func() {
|
||||
By("CRs in different groups (two CRDs) show up in OpenAPI documentation")
|
||||
ginkgo.It("works for multiple CRDs of different groups", func() {
|
||||
ginkgo.By("CRs in different groups (two CRDs) show up in OpenAPI documentation")
|
||||
crdFoo, err := setupCRD(f, schemaFoo, "foo", "v1")
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
|
@ -185,8 +185,8 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
}
|
||||
})
|
||||
|
||||
It("works for multiple CRDs of same group but different versions", func() {
|
||||
By("CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation")
|
||||
ginkgo.It("works for multiple CRDs of same group but different versions", func() {
|
||||
ginkgo.By("CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation")
|
||||
crdMultiVer, err := setupCRD(f, schemaFoo, "multi-ver", "v2", "v3")
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
|
@ -201,7 +201,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
By("CRs in the same group but different versions (two CRDs) show up in OpenAPI documentation")
|
||||
ginkgo.By("CRs in the same group but different versions (two CRDs) show up in OpenAPI documentation")
|
||||
crdFoo, err := setupCRD(f, schemaFoo, "common-group", "v4")
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
|
@ -227,8 +227,8 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
}
|
||||
})
|
||||
|
||||
It("works for multiple CRDs of same group and version but different kinds", func() {
|
||||
By("CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation")
|
||||
ginkgo.It("works for multiple CRDs of same group and version but different kinds", func() {
|
||||
ginkgo.By("CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation")
|
||||
crdFoo, err := setupCRD(f, schemaFoo, "common-group", "v6")
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
|
@ -254,8 +254,8 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
}
|
||||
})
|
||||
|
||||
It("updates the published spec when one versin gets renamed", func() {
|
||||
By("set up a multi version CRD")
|
||||
ginkgo.It("updates the published spec when one versin gets renamed", func() {
|
||||
ginkgo.By("set up a multi version CRD")
|
||||
crdMultiVer, err := setupCRD(f, schemaFoo, "multi-ver", "v2", "v3")
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
|
@ -267,22 +267,22 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
By("rename a version")
|
||||
ginkgo.By("rename a version")
|
||||
patch := []byte(`{"spec":{"versions":[{"name":"v2","served":true,"storage":true},{"name":"v4","served":true,"storage":false}]}}`)
|
||||
crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(crdMultiVer.GetMetaName(), types.MergePatchType, patch)
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
By("check the new version name is served")
|
||||
ginkgo.By("check the new version name is served")
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v4"), schemaFoo); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
By("check the old version name is removed")
|
||||
ginkgo.By("check the old version name is removed")
|
||||
if err := waitForDefinitionCleanup(f.ClientSet, definitionName(crdMultiVer, "v3")); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
By("check the other version is not changed")
|
||||
ginkgo.By("check the other version is not changed")
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v2"), schemaFoo); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
@ -295,8 +295,8 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
}
|
||||
})
|
||||
|
||||
It("removes definition from spec when one versin gets changed to not be served", func() {
|
||||
By("set up a multi version CRD")
|
||||
ginkgo.It("removes definition from spec when one versin gets changed to not be served", func() {
|
||||
ginkgo.By("set up a multi version CRD")
|
||||
crd, err := setupCRD(f, schemaFoo, "multi-to-single-ver", "v5", "v6alpha1")
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
|
@ -309,18 +309,18 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
|||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
By("mark a version not serverd")
|
||||
ginkgo.By("mark a version not serverd")
|
||||
crd.Crd.Spec.Versions[1].Served = false
|
||||
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd.Crd)
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
By("check the unserved version gets removed")
|
||||
ginkgo.By("check the unserved version gets removed")
|
||||
if err := waitForDefinitionCleanup(f.ClientSet, definitionName(crd, "v6alpha1")); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
By("check the other version is not changed")
|
||||
ginkgo.By("check the other version is not changed")
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crd, "v5"), schemaFoo); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
|
|
@ -30,21 +30,21 @@ import (
|
|||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("crd-watch")
|
||||
|
||||
Context("CustomResourceDefinition Watch", func() {
|
||||
ginkgo.Context("CustomResourceDefinition Watch", func() {
|
||||
/*
|
||||
Testname: crd-watch
|
||||
Description: Create a Custom Resource Definition and make sure
|
||||
watches observe events on create/delete.
|
||||
*/
|
||||
It("watch on custom resource definition objects", func() {
|
||||
ginkgo.It("watch on custom resource definition objects", func() {
|
||||
|
||||
framework.SkipUnlessServerVersionGTE(crdVersion, f.ClientSet.Discovery())
|
||||
|
||||
|
@ -80,35 +80,35 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
|
|||
noxuResourceClient := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition)
|
||||
|
||||
watchA, err := watchCRWithName(noxuResourceClient, watchCRNameA)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to watch custom resource: %s", watchCRNameA)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to watch custom resource: %s", watchCRNameA)
|
||||
|
||||
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to watch custom resource: %s", watchCRNameB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to watch custom resource: %s", watchCRNameB)
|
||||
|
||||
testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA)
|
||||
testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB)
|
||||
|
||||
By("Creating first CR ")
|
||||
ginkgo.By("Creating first CR ")
|
||||
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to instantiate custom resource: %+v", testCrA)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to instantiate custom resource: %+v", testCrA)
|
||||
expectEvent(watchA, watch.Added, testCrA)
|
||||
expectNoEvent(watchB, watch.Added, testCrA)
|
||||
|
||||
By("Creating second CR")
|
||||
ginkgo.By("Creating second CR")
|
||||
testCrB, err = instantiateCustomResource(testCrB, noxuResourceClient, noxuDefinition)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to instantiate custom resource: %+v", testCrB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to instantiate custom resource: %+v", testCrB)
|
||||
expectEvent(watchB, watch.Added, testCrB)
|
||||
expectNoEvent(watchA, watch.Added, testCrB)
|
||||
|
||||
By("Deleting first CR")
|
||||
ginkgo.By("Deleting first CR")
|
||||
err = deleteCustomResource(noxuResourceClient, watchCRNameA)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete custom resource: %s", watchCRNameA)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete custom resource: %s", watchCRNameA)
|
||||
expectEvent(watchA, watch.Deleted, nil)
|
||||
expectNoEvent(watchB, watch.Deleted, nil)
|
||||
|
||||
By("Deleting second CR")
|
||||
ginkgo.By("Deleting second CR")
|
||||
err = deleteCustomResource(noxuResourceClient, watchCRNameB)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete custom resource: %s", watchCRNameB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete custom resource: %s", watchCRNameB)
|
||||
expectEvent(watchB, watch.Deleted, nil)
|
||||
expectNoEvent(watchA, watch.Deleted, nil)
|
||||
})
|
||||
|
@ -159,8 +159,7 @@ func newNamespacedCustomResourceClient(ns string, client dynamic.Interface, crd
|
|||
|
||||
if crd.Spec.Scope != apiextensionsv1beta1.ClusterScoped {
|
||||
return client.Resource(gvr).Namespace(ns)
|
||||
} else {
|
||||
return client.Resource(gvr)
|
||||
}
|
||||
return client.Resource(gvr)
|
||||
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var crdVersion = utilversion.MustParseSemantic("v1.7.0")
|
||||
|
@ -32,7 +32,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
|
|||
|
||||
f := framework.NewDefaultFramework("custom-resource-definition")
|
||||
|
||||
Context("Simple CustomResourceDefinition", func() {
|
||||
ginkgo.Context("Simple CustomResourceDefinition", func() {
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Custom Resource Definition, create
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var storageVersionServerVersion = utilversion.MustParseSemantic("v1.13.99")
|
||||
|
@ -31,16 +31,16 @@ var _ = SIGDescribe("Discovery", func() {
|
|||
|
||||
var namespaceName string
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
namespaceName = f.Namespace.Name
|
||||
|
||||
framework.SkipUnlessServerVersionGTE(storageVersionServerVersion, f.ClientSet.Discovery())
|
||||
|
||||
By("Setting up server cert")
|
||||
ginkgo.By("Setting up server cert")
|
||||
setupServerCert(namespaceName, serviceName)
|
||||
})
|
||||
|
||||
It("[Feature:StorageVersionHash] Custom resource should have storage version hash", func() {
|
||||
ginkgo.It("[Feature:StorageVersionHash] Custom resource should have storage version hash", func() {
|
||||
testcrd, err := crd.CreateTestCRD(f)
|
||||
if err != nil {
|
||||
return
|
||||
|
|
|
@ -28,15 +28,15 @@ import (
|
|||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("etcd-failure")
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
// This test requires:
|
||||
// - SSH
|
||||
// - master access
|
||||
|
@ -44,16 +44,16 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
|
|||
// providers that provide those capabilities.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
|
||||
Expect(framework.RunRC(testutils.RCConfig{
|
||||
gomega.Expect(framework.RunRC(testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
Name: "baz",
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 1,
|
||||
})).NotTo(HaveOccurred())
|
||||
})).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
It("should recover from network partition with master", func() {
|
||||
ginkgo.It("should recover from network partition with master", func() {
|
||||
etcdFailTest(
|
||||
f,
|
||||
"sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP",
|
||||
|
@ -61,7 +61,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
|
|||
)
|
||||
})
|
||||
|
||||
It("should recover from SIGKILL", func() {
|
||||
ginkgo.It("should recover from SIGKILL", func() {
|
||||
etcdFailTest(
|
||||
f,
|
||||
"pgrep etcd | xargs -I {} sudo kill -9 {}",
|
||||
|
@ -86,7 +86,7 @@ func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) {
|
|||
const etcdFailureDuration = 20 * time.Second
|
||||
|
||||
func doEtcdFailure(failCommand, fixCommand string) {
|
||||
By("failing etcd")
|
||||
ginkgo.By("failing etcd")
|
||||
|
||||
masterExec(failCommand)
|
||||
time.Sleep(etcdFailureDuration)
|
||||
|
@ -96,7 +96,7 @@ func doEtcdFailure(failCommand, fixCommand string) {
|
|||
func masterExec(cmd string) {
|
||||
host := framework.GetMasterHost() + ":22"
|
||||
result, err := framework.SSH(cmd, host, framework.TestContext.Provider)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
|
||||
if result.Code != 0 {
|
||||
framework.LogSSHResult(result)
|
||||
framework.Failf("master exec command returned non-zero")
|
||||
|
@ -104,11 +104,11 @@ func masterExec(cmd string) {
|
|||
}
|
||||
|
||||
func checkExistingRCRecovers(f *framework.Framework) {
|
||||
By("assert that the pre-existing replication controller recovers")
|
||||
ginkgo.By("assert that the pre-existing replication controller recovers")
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
rcSelector := labels.Set{"name": "baz"}.AsSelector()
|
||||
|
||||
By("deleting pods from existing replication controller")
|
||||
ginkgo.By("deleting pods from existing replication controller")
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
|
@ -121,17 +121,17 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
|||
}
|
||||
for _, pod := range pods.Items {
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
}
|
||||
framework.Logf("apiserver has recovered")
|
||||
return true, nil
|
||||
}))
|
||||
|
||||
By("waiting for replication controller to recover")
|
||||
ginkgo.By("waiting for replication controller to recover")
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {
|
||||
return true, nil
|
||||
|
|
|
@ -18,6 +18,7 @@ package apimachinery
|
|||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
// SIGDescribe annotates the test with the SIG label.
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-api-machinery] "+text, body)
|
||||
}
|
||||
|
|
|
@ -40,8 +40,8 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
|
@ -89,8 +89,10 @@ func getOrphanOptions() *metav1.DeleteOptions {
|
|||
}
|
||||
|
||||
var (
|
||||
zero = int64(0)
|
||||
lablecount = int64(0)
|
||||
zero = int64(0)
|
||||
lablecount = int64(0)
|
||||
|
||||
// CronJobGroupVersionResource unambiguously identifies a CronJob resource.
|
||||
CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv1beta1.GroupName, Version: "v1beta1", Resource: "cronjobs"}
|
||||
)
|
||||
|
||||
|
@ -182,7 +184,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
|
|||
}
|
||||
if len(pods.Items) != num {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d pods, got %d pods", num, len(pods.Items)))
|
||||
ginkgo.By(fmt.Sprintf("expected %d pods, got %d pods", num, len(pods.Items)))
|
||||
}
|
||||
case "Deployments":
|
||||
deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
|
@ -191,7 +193,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
|
|||
}
|
||||
if len(deployments.Items) != num {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d Deployments, got %d Deployments", num, len(deployments.Items)))
|
||||
ginkgo.By(fmt.Sprintf("expected %d Deployments, got %d Deployments", num, len(deployments.Items)))
|
||||
}
|
||||
case "ReplicaSets":
|
||||
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
|
@ -200,7 +202,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
|
|||
}
|
||||
if len(rs.Items) != num {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d rs, got %d rs", num, len(rs.Items)))
|
||||
ginkgo.By(fmt.Sprintf("expected %d rs, got %d rs", num, len(rs.Items)))
|
||||
}
|
||||
case "ReplicationControllers":
|
||||
rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
|
@ -209,7 +211,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
|
|||
}
|
||||
if len(rcs.Items) != num {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d RCs, got %d RCs", num, len(rcs.Items)))
|
||||
ginkgo.By(fmt.Sprintf("expected %d RCs, got %d RCs", num, len(rcs.Items)))
|
||||
}
|
||||
case "CronJobs":
|
||||
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
|
@ -218,7 +220,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
|
|||
}
|
||||
if len(cronJobs.Items) != num {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", num, len(cronJobs.Items)))
|
||||
ginkgo.By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", num, len(cronJobs.Items)))
|
||||
}
|
||||
case "Jobs":
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
|
@ -227,7 +229,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
|
|||
}
|
||||
if len(jobs.Items) != num {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d jobs, got %d jobs", num, len(jobs.Items)))
|
||||
ginkgo.By(fmt.Sprintf("expected %d jobs, got %d jobs", num, len(jobs.Items)))
|
||||
}
|
||||
default:
|
||||
return false, fmt.Errorf("object %s is not supported", object)
|
||||
|
@ -238,7 +240,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
|
|||
}
|
||||
|
||||
func gatherMetrics(f *framework.Framework) {
|
||||
By("Gathering metrics")
|
||||
ginkgo.By("Gathering metrics")
|
||||
var summary framework.TestDataSummary
|
||||
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, false, false, true, false, false)
|
||||
if err != nil {
|
||||
|
@ -312,7 +314,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
rcName := "simpletest.rc"
|
||||
uniqLabels := getUniqLabel("gctest", "delete_pods")
|
||||
rc := newOwnerRC(f, rcName, 2, uniqLabels)
|
||||
By("create the rc")
|
||||
ginkgo.By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
|
@ -329,19 +331,19 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
// stasis.
|
||||
if len(pods.Items) > 0 {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
|
||||
}); err != nil {
|
||||
framework.Failf("failed to wait for the rc to create some pods: %v", err)
|
||||
}
|
||||
By("delete the rc")
|
||||
ginkgo.By("delete the rc")
|
||||
deleteOptions := getBackgroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
By("wait for all pods to be garbage collected")
|
||||
ginkgo.By("wait for all pods to be garbage collected")
|
||||
// wait for the RCs and Pods to reach the expected numbers.
|
||||
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
objects := map[string]int{"ReplicationControllers": 0, "Pods": 0}
|
||||
|
@ -370,7 +372,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
rcName := "simpletest.rc"
|
||||
uniqLabels := getUniqLabel("gctest", "orphan_pods")
|
||||
rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels)
|
||||
By("create the rc")
|
||||
ginkgo.By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
|
@ -383,19 +385,19 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
}
|
||||
if rc.Status.Replicas == *rc.Spec.Replicas {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
|
||||
}); err != nil {
|
||||
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
}
|
||||
By("delete the rc")
|
||||
ginkgo.By("delete the rc")
|
||||
deleteOptions := getOrphanOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
By("wait for the rc to be deleted")
|
||||
ginkgo.By("wait for the rc to be deleted")
|
||||
// Orphaning the 100 pods takes 100 PATCH operations. The default qps of
|
||||
// a client is 5. If the qps is saturated, it will take 20s to orphan
|
||||
// the pods. However, apiserver takes hundreds of ms to finish one
|
||||
|
@ -415,7 +417,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
}); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
|
||||
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
|
||||
time.Sleep(30 * time.Second)
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
|
@ -429,14 +431,14 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
|
||||
// deleteOptions.OrphanDependents is deprecated in 1.7 and preferred to use the PropagationPolicy.
|
||||
// Discussion is tracked under https://github.com/kubernetes/kubernetes/issues/65427 to promote for conformance in future.
|
||||
It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
|
||||
ginkgo.It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
|
||||
clientSet := f.ClientSet
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
rcName := "simpletest.rc"
|
||||
uniqLabels := getUniqLabel("gctest", "orphan_pods_nil_option")
|
||||
rc := newOwnerRC(f, rcName, 2, uniqLabels)
|
||||
By("create the rc")
|
||||
ginkgo.By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
|
@ -449,19 +451,18 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
}
|
||||
if rc.Status.Replicas == *rc.Spec.Replicas {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
}
|
||||
By("delete the rc")
|
||||
ginkgo.By("delete the rc")
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
|
||||
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
|
||||
time.Sleep(30 * time.Second)
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
|
@ -485,13 +486,13 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
deploymentName := "simpletest.deployment"
|
||||
uniqLabels := getUniqLabel("gctest", "delete_rs")
|
||||
deployment := newOwnerDeployment(f, deploymentName, uniqLabels)
|
||||
By("create the deployment")
|
||||
ginkgo.By("create the deployment")
|
||||
createdDeployment, err := deployClient.Create(deployment)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create deployment: %v", err)
|
||||
}
|
||||
// wait for deployment to create some rs
|
||||
By("Wait for the Deployment to create new ReplicaSet")
|
||||
ginkgo.By("Wait for the Deployment to create new ReplicaSet")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
rsList, err := rsClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
|
@ -504,13 +505,13 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
framework.Failf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
|
||||
}
|
||||
|
||||
By("delete the deployment")
|
||||
ginkgo.By("delete the deployment")
|
||||
deleteOptions := getBackgroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
|
||||
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the deployment: %v", err)
|
||||
}
|
||||
By("wait for all rs to be garbage collected")
|
||||
ginkgo.By("wait for all rs to be garbage collected")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
objects := map[string]int{"Deployments": 0, "ReplicaSets": 0, "Pods": 0}
|
||||
return verifyRemainingObjects(f, objects)
|
||||
|
@ -544,13 +545,13 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
deploymentName := "simpletest.deployment"
|
||||
uniqLabels := getUniqLabel("gctest", "orphan_rs")
|
||||
deployment := newOwnerDeployment(f, deploymentName, uniqLabels)
|
||||
By("create the deployment")
|
||||
ginkgo.By("create the deployment")
|
||||
createdDeployment, err := deployClient.Create(deployment)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create deployment: %v", err)
|
||||
}
|
||||
// wait for deployment to create some rs
|
||||
By("Wait for the Deployment to create new ReplicaSet")
|
||||
ginkgo.By("Wait for the Deployment to create new ReplicaSet")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
rsList, err := rsClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
|
@ -563,13 +564,13 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
framework.Failf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
|
||||
}
|
||||
|
||||
By("delete the deployment")
|
||||
ginkgo.By("delete the deployment")
|
||||
deleteOptions := getOrphanOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
|
||||
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the deployment: %v", err)
|
||||
}
|
||||
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the rs")
|
||||
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the rs")
|
||||
time.Sleep(30 * time.Second)
|
||||
objects := map[string]int{"Deployments": 0, "ReplicaSets": 1, "Pods": 2}
|
||||
ok, err := verifyRemainingObjects(f, objects)
|
||||
|
@ -618,7 +619,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
rcName := "simpletest.rc"
|
||||
uniqLabels := getUniqLabel("gctest", "delete_pods_foreground")
|
||||
rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels)
|
||||
By("create the rc")
|
||||
ginkgo.By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
|
@ -631,19 +632,18 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
}
|
||||
if rc.Status.Replicas == *rc.Spec.Replicas {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
}
|
||||
By("delete the rc")
|
||||
ginkgo.By("delete the rc")
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
By("wait for the rc to be deleted")
|
||||
ginkgo.By("wait for the rc to be deleted")
|
||||
// default client QPS is 20, deleting each pod requires 2 requests, so 30s should be enough
|
||||
// TODO: 30s is enough assuming immediate processing of dependents following
|
||||
// owner deletion, but in practice there can be a long delay between owner
|
||||
|
@ -663,13 +663,11 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
framework.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
framework.Logf("")
|
||||
return false, nil
|
||||
} else {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}); err != nil {
|
||||
pods, err2 := podClient.List(metav1.ListOptions{})
|
||||
if err2 != nil {
|
||||
|
@ -706,17 +704,17 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
rc1Name := "simpletest-rc-to-be-deleted"
|
||||
replicas := int32(estimateMaximumPods(clientSet, 10, 100))
|
||||
halfReplicas := int(replicas / 2)
|
||||
uniqLabels_deleted := getUniqLabel("gctest_d", "valid_and_pending_owners_d")
|
||||
rc1 := newOwnerRC(f, rc1Name, replicas, uniqLabels_deleted)
|
||||
By("create the rc1")
|
||||
uniqLabelsDeleted := getUniqLabel("gctest_d", "valid_and_pending_owners_d")
|
||||
rc1 := newOwnerRC(f, rc1Name, replicas, uniqLabelsDeleted)
|
||||
ginkgo.By("create the rc1")
|
||||
rc1, err := rcClient.Create(rc1)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
rc2Name := "simpletest-rc-to-stay"
|
||||
uniqLabels_stay := getUniqLabel("gctest_s", "valid_and_pending_owners_s")
|
||||
rc2 := newOwnerRC(f, rc2Name, 0, uniqLabels_stay)
|
||||
By("create the rc2")
|
||||
uniqLabelsStay := getUniqLabel("gctest_s", "valid_and_pending_owners_s")
|
||||
rc2 := newOwnerRC(f, rc2Name, 0, uniqLabelsStay)
|
||||
ginkgo.By("create the rc2")
|
||||
rc2, err = rcClient.Create(rc2)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
|
@ -729,29 +727,28 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
}
|
||||
if rc1.Status.Replicas == *rc1.Spec.Replicas {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
}
|
||||
By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
|
||||
ginkgo.By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", f.Namespace.Name)
|
||||
patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID)
|
||||
for i := 0; i < halfReplicas; i++ {
|
||||
pod := pods.Items[i]
|
||||
_, err := podClient.Patch(pod.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch)
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("delete the rc %s", rc1Name))
|
||||
ginkgo.By(fmt.Sprintf("delete the rc %s", rc1Name))
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc1.UID))
|
||||
if err := rcClient.Delete(rc1.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
By("wait for the rc to be deleted")
|
||||
ginkgo.By("wait for the rc to be deleted")
|
||||
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
|
||||
// Tracked at https://github.com/kubernetes/kubernetes/issues/50046.
|
||||
if err := wait.Poll(5*time.Second, 90*time.Second, func() (bool, error) {
|
||||
|
@ -768,13 +765,11 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
framework.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
framework.Logf("")
|
||||
return false, nil
|
||||
} else {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}); err != nil {
|
||||
pods, err2 := podClient.List(metav1.ListOptions{})
|
||||
if err2 != nil {
|
||||
|
@ -819,36 +814,36 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
pod1Name := "pod1"
|
||||
pod1 := newGCPod(pod1Name)
|
||||
pod1, err := podClient.Create(pod1)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name)
|
||||
pod2Name := "pod2"
|
||||
pod2 := newGCPod(pod2Name)
|
||||
pod2, err = podClient.Create(pod2)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name)
|
||||
pod3Name := "pod3"
|
||||
pod3 := newGCPod(pod3Name)
|
||||
pod3, err = podClient.Create(pod3)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name)
|
||||
// create circular dependency
|
||||
addRefPatch := func(name string, uid types.UID) []byte {
|
||||
return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid))
|
||||
}
|
||||
patch1 := addRefPatch(pod3.Name, pod3.UID)
|
||||
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
|
||||
framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
|
||||
patch2 := addRefPatch(pod1.Name, pod1.UID)
|
||||
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
|
||||
framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
|
||||
patch3 := addRefPatch(pod2.Name, pod2.UID)
|
||||
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
|
||||
framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
|
||||
// delete one pod, should result in the deletion of all pods
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
|
||||
err = podClient.Delete(pod1.ObjectMeta.Name, deleteOptions)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name)
|
||||
var pods *v1.PodList
|
||||
var err2 error
|
||||
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
|
||||
|
@ -860,16 +855,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
}
|
||||
if len(pods.Items) == 0 {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
framework.Logf("pods are %#v", pods.Items)
|
||||
framework.Failf("failed to wait for all pods to be deleted: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
It("should support cascading deletion of custom resources", func() {
|
||||
ginkgo.It("should support cascading deletion of custom resources", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
|
@ -970,7 +964,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
}
|
||||
})
|
||||
|
||||
It("should support orphan deletion of custom resources", func() {
|
||||
ginkgo.It("should support orphan deletion of custom resources", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
|
@ -1049,7 +1043,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
framework.Failf("failed to delete owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
|
||||
By("wait for the owner to be deleted")
|
||||
ginkgo.By("wait for the owner to be deleted")
|
||||
if err := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
|
||||
_, err = resourceClient.Get(ownerName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
|
@ -1064,7 +1058,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
}
|
||||
|
||||
// Wait 30s and ensure the dependent is not deleted.
|
||||
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the dependent crd")
|
||||
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the dependent crd")
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
_, err := resourceClient.Get(dependentName, metav1.GetOptions{})
|
||||
return false, err
|
||||
|
@ -1073,15 +1067,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
}
|
||||
})
|
||||
|
||||
It("should delete jobs and pods created by cronjob", func() {
|
||||
ginkgo.It("should delete jobs and pods created by cronjob", func() {
|
||||
framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResource, f.Namespace.Name)
|
||||
|
||||
By("Create the cronjob")
|
||||
ginkgo.By("Create the cronjob")
|
||||
cronJob := newCronJob("simple", "*/1 * * * ?")
|
||||
cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(cronJob)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name)
|
||||
|
||||
By("Wait for the CronJob to create new Job")
|
||||
ginkgo.By("Wait for the CronJob to create new Job")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) {
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
|
@ -1093,11 +1087,11 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
framework.Failf("Failed to wait for the CronJob to create some Jobs: %v", err)
|
||||
}
|
||||
|
||||
By("Delete the cronjob")
|
||||
ginkgo.By("Delete the cronjob")
|
||||
if err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Delete(cronJob.Name, getBackgroundOptions()); err != nil {
|
||||
framework.Failf("Failed to delete the CronJob: %v", err)
|
||||
}
|
||||
By("Verify if cronjob does not leave jobs nor pods behind")
|
||||
ginkgo.By("Verify if cronjob does not leave jobs nor pods behind")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
objects := map[string]int{"CronJobs": 0, "Jobs": 0, "Pods": 0}
|
||||
return verifyRemainingObjects(f, objects)
|
||||
|
|
|
@ -31,8 +31,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
|
@ -150,21 +150,21 @@ func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool)
|
|||
|
||||
var _ = SIGDescribe("Generated clientset", func() {
|
||||
f := framework.NewDefaultFramework("clientset")
|
||||
It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func() {
|
||||
ginkgo.It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func() {
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
By("constructing the pod")
|
||||
ginkgo.By("constructing the pod")
|
||||
name := "pod" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
podCopy := testingPod(name, value)
|
||||
pod := &podCopy
|
||||
By("setting up watch")
|
||||
ginkgo.By("setting up watch")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
|
||||
options := metav1.ListOptions{LabelSelector: selector}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to query for pods: %v", err)
|
||||
}
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
|
@ -174,13 +174,13 @@ var _ = SIGDescribe("Generated clientset", func() {
|
|||
framework.Failf("Failed to set up watch: %v", err)
|
||||
}
|
||||
|
||||
By("creating the pod")
|
||||
ginkgo.By("creating the pod")
|
||||
pod, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
ginkgo.By("verifying the pod is in kubernetes")
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
ResourceVersion: pod.ResourceVersion,
|
||||
|
@ -189,22 +189,22 @@ var _ = SIGDescribe("Generated clientset", func() {
|
|||
if err != nil {
|
||||
framework.Failf("Failed to query for pods: %v", err)
|
||||
}
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
|
||||
|
||||
By("verifying pod creation was observed")
|
||||
ginkgo.By("verifying pod creation was observed")
|
||||
observeCreation(w)
|
||||
|
||||
// We need to wait for the pod to be scheduled, otherwise the deletion
|
||||
// will be carried out immediately rather than gracefully.
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
ginkgo.By("deleting the pod gracefully")
|
||||
gracePeriod := int64(31)
|
||||
if err := podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracePeriod)); err != nil {
|
||||
framework.Failf("Failed to delete pod: %v", err)
|
||||
}
|
||||
|
||||
By("verifying the deletionTimestamp and deletionGracePeriodSeconds of the pod is set")
|
||||
ginkgo.By("verifying the deletionTimestamp and deletionGracePeriodSeconds of the pod is set")
|
||||
observerUpdate(w, func(obj runtime.Object) bool {
|
||||
pod := obj.(*v1.Pod)
|
||||
return pod.ObjectMeta.DeletionTimestamp != nil && *pod.ObjectMeta.DeletionGracePeriodSeconds == gracePeriod
|
||||
|
@ -263,24 +263,24 @@ func newTestingCronJob(name string, value string) *batchv1beta1.CronJob {
|
|||
var _ = SIGDescribe("Generated clientset", func() {
|
||||
f := framework.NewDefaultFramework("clientset")
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResource, f.Namespace.Name)
|
||||
})
|
||||
|
||||
It("should create v1beta1 cronJobs, delete cronJobs, watch cronJobs", func() {
|
||||
ginkgo.It("should create v1beta1 cronJobs, delete cronJobs, watch cronJobs", func() {
|
||||
cronJobClient := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name)
|
||||
By("constructing the cronJob")
|
||||
ginkgo.By("constructing the cronJob")
|
||||
name := "cronjob" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
cronJob := newTestingCronJob(name, value)
|
||||
By("setting up watch")
|
||||
ginkgo.By("setting up watch")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
|
||||
options := metav1.ListOptions{LabelSelector: selector}
|
||||
cronJobs, err := cronJobClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to query for cronJobs: %v", err)
|
||||
}
|
||||
Expect(len(cronJobs.Items)).To(Equal(0))
|
||||
gomega.Expect(len(cronJobs.Items)).To(gomega.Equal(0))
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
ResourceVersion: cronJobs.ListMeta.ResourceVersion,
|
||||
|
@ -290,13 +290,13 @@ var _ = SIGDescribe("Generated clientset", func() {
|
|||
framework.Failf("Failed to set up watch: %v", err)
|
||||
}
|
||||
|
||||
By("creating the cronJob")
|
||||
ginkgo.By("creating the cronJob")
|
||||
cronJob, err = cronJobClient.Create(cronJob)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create cronJob: %v", err)
|
||||
}
|
||||
|
||||
By("verifying the cronJob is in kubernetes")
|
||||
ginkgo.By("verifying the cronJob is in kubernetes")
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
ResourceVersion: cronJob.ResourceVersion,
|
||||
|
@ -305,12 +305,12 @@ var _ = SIGDescribe("Generated clientset", func() {
|
|||
if err != nil {
|
||||
framework.Failf("Failed to query for cronJobs: %v", err)
|
||||
}
|
||||
Expect(len(cronJobs.Items)).To(Equal(1))
|
||||
gomega.Expect(len(cronJobs.Items)).To(gomega.Equal(1))
|
||||
|
||||
By("verifying cronJob creation was observed")
|
||||
ginkgo.By("verifying cronJob creation was observed")
|
||||
observeCreation(w)
|
||||
|
||||
By("deleting the cronJob")
|
||||
ginkgo.By("deleting the cronJob")
|
||||
// Use DeletePropagationBackground so the CronJob is really gone when the call returns.
|
||||
propagationPolicy := metav1.DeletePropagationBackground
|
||||
if err := cronJobClient.Delete(cronJob.Name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
|
||||
|
@ -322,6 +322,6 @@ var _ = SIGDescribe("Generated clientset", func() {
|
|||
if err != nil {
|
||||
framework.Failf("Failed to list cronJobs to verify deletion: %v", err)
|
||||
}
|
||||
Expect(len(cronJobs.Items)).To(Equal(0))
|
||||
gomega.Expect(len(cronJobs.Items)).To(gomega.Equal(0))
|
||||
})
|
||||
})
|
||||
|
|
|
@ -31,36 +31,36 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, maxSeconds int) {
|
||||
var err error
|
||||
|
||||
By("Creating testing namespaces")
|
||||
ginkgo.By("Creating testing namespaces")
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(totalNS)
|
||||
for n := 0; n < totalNS; n += 1 {
|
||||
for n := 0; n < totalNS; n++ {
|
||||
go func(n int) {
|
||||
defer wg.Done()
|
||||
defer GinkgoRecover()
|
||||
defer ginkgo.GinkgoRecover()
|
||||
ns := fmt.Sprintf("nslifetest-%v", n)
|
||||
_, err = f.CreateNamespace(ns, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", ns)
|
||||
}(n)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
//Wait 10 seconds, then SEND delete requests for all the namespaces.
|
||||
By("Waiting 10 seconds")
|
||||
ginkgo.By("Waiting 10 seconds")
|
||||
time.Sleep(time.Duration(10 * time.Second))
|
||||
deleteFilter := []string{"nslifetest"}
|
||||
deleted, err := framework.DeleteNamespaces(f.ClientSet, deleteFilter, nil /* skipFilter */)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete namespace(s) containing: %s", deleteFilter)
|
||||
Expect(len(deleted)).To(Equal(totalNS))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete namespace(s) containing: %s", deleteFilter)
|
||||
gomega.Expect(len(deleted)).To(gomega.Equal(totalNS))
|
||||
|
||||
By("Waiting for namespaces to vanish")
|
||||
ginkgo.By("Waiting for namespaces to vanish")
|
||||
//Now POLL until all namespaces have been eradicated.
|
||||
framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
|
||||
func() (bool, error) {
|
||||
|
@ -95,21 +95,21 @@ func waitForPodInNamespace(c clientset.Interface, ns, podName string) *v1.Pod {
|
|||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
|
||||
return pod
|
||||
}
|
||||
|
||||
func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
By("Creating a test namespace")
|
||||
ginkgo.By("Creating a test namespace")
|
||||
namespaceName := "nsdeletetest"
|
||||
namespace, err := f.CreateNamespace(namespaceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
|
||||
By("Waiting for a default service account to be provisioned in namespace")
|
||||
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
|
||||
By("Creating a pod in the namespace")
|
||||
ginkgo.By("Creating a pod in the namespace")
|
||||
podName := "test-pod"
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -125,16 +125,16 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
},
|
||||
}
|
||||
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", podName, namespace.Name)
|
||||
|
||||
By("Waiting for the pod to have running status")
|
||||
ginkgo.By("Waiting for the pod to have running status")
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
|
||||
By("Deleting the namespace")
|
||||
ginkgo.By("Deleting the namespace")
|
||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
|
||||
|
||||
By("Waiting for the namespace to be removed.")
|
||||
ginkgo.By("Waiting for the namespace to be removed.")
|
||||
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
|
||||
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
|
||||
func() (bool, error) {
|
||||
|
@ -145,28 +145,28 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
return false, nil
|
||||
}))
|
||||
|
||||
By("Recreating the namespace")
|
||||
ginkgo.By("Recreating the namespace")
|
||||
namespace, err = f.CreateNamespace(namespaceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
|
||||
By("Verifying there are no pods in the namespace")
|
||||
ginkgo.By("Verifying there are no pods in the namespace")
|
||||
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).To(HaveOccurred(), "failed to get pod %s in namespace: %s", pod.Name, namespace.Name)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "failed to get pod %s in namespace: %s", pod.Name, namespace.Name)
|
||||
}
|
||||
|
||||
func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
var err error
|
||||
|
||||
By("Creating a test namespace")
|
||||
ginkgo.By("Creating a test namespace")
|
||||
namespaceName := "nsdeletetest"
|
||||
namespace, err := f.CreateNamespace(namespaceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
|
||||
By("Waiting for a default service account to be provisioned in namespace")
|
||||
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
|
||||
By("Creating a service in the namespace")
|
||||
ginkgo.By("Creating a service in the namespace")
|
||||
serviceName := "test-service"
|
||||
labels := map[string]string{
|
||||
"foo": "bar",
|
||||
|
@ -185,13 +185,13 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
},
|
||||
}
|
||||
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create service %s in namespace %s", serviceName, namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service %s in namespace %s", serviceName, namespace.Name)
|
||||
|
||||
By("Deleting the namespace")
|
||||
ginkgo.By("Deleting the namespace")
|
||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
|
||||
|
||||
By("Waiting for the namespace to be removed.")
|
||||
ginkgo.By("Waiting for the namespace to be removed.")
|
||||
maxWaitSeconds := int64(60)
|
||||
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
|
||||
func() (bool, error) {
|
||||
|
@ -202,13 +202,13 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
return false, nil
|
||||
}))
|
||||
|
||||
By("Recreating the namespace")
|
||||
ginkgo.By("Recreating the namespace")
|
||||
namespace, err = f.CreateNamespace(namespaceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
|
||||
By("Verifying there is no service in the namespace")
|
||||
ginkgo.By("Verifying there is no service in the namespace")
|
||||
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
|
||||
Expect(err).To(HaveOccurred(), "failed to get service %s in namespace: %s", service.Name, namespace.Name)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "failed to get service %s in namespace: %s", service.Name, namespace.Name)
|
||||
}
|
||||
|
||||
// This test must run [Serial] due to the impact of running other parallel
|
||||
|
@ -257,11 +257,11 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
|
|||
framework.ConformanceIt("should ensure that all services are removed when a namespace is deleted",
|
||||
func() { ensureServicesAreRemovedWhenNamespaceIsDeleted(f) })
|
||||
|
||||
It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",
|
||||
ginkgo.It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",
|
||||
func() { extinguish(f, 100, 10, 150) })
|
||||
|
||||
// On hold until etcd3; see #7372
|
||||
It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]",
|
||||
ginkgo.It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]",
|
||||
func() { extinguish(f, 100, 0, 150) })
|
||||
|
||||
})
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -22,8 +22,8 @@ import (
|
|||
"fmt"
|
||||
"text/tabwriter"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
authorizationv1 "k8s.io/api/authorization/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
|
@ -43,11 +43,11 @@ var serverPrintVersion = utilversion.MustParseSemantic("v1.10.0")
|
|||
var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
f := framework.NewDefaultFramework("tables")
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessServerVersionGTE(serverPrintVersion, f.ClientSet.Discovery())
|
||||
})
|
||||
|
||||
It("should return pod details", func() {
|
||||
ginkgo.It("should return pod details", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
|
@ -55,31 +55,31 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||
framework.Logf("Creating pod %s", podName)
|
||||
|
||||
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||
|
||||
table := &metav1beta1.Table{}
|
||||
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in Table form in namespace: %s", podName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %s in Table form in namespace: %s", podName, ns)
|
||||
framework.Logf("Table: %#v", table)
|
||||
|
||||
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">", 2))
|
||||
Expect(len(table.Rows)).To(Equal(1))
|
||||
Expect(len(table.Rows[0].Cells)).To(Equal(len(table.ColumnDefinitions)))
|
||||
Expect(table.ColumnDefinitions[0].Name).To(Equal("Name"))
|
||||
Expect(table.Rows[0].Cells[0]).To(Equal(podName))
|
||||
gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">", 2))
|
||||
gomega.Expect(len(table.Rows)).To(gomega.Equal(1))
|
||||
gomega.Expect(len(table.Rows[0].Cells)).To(gomega.Equal(len(table.ColumnDefinitions)))
|
||||
gomega.Expect(table.ColumnDefinitions[0].Name).To(gomega.Equal("Name"))
|
||||
gomega.Expect(table.Rows[0].Cells[0]).To(gomega.Equal(podName))
|
||||
|
||||
out := printTable(table)
|
||||
Expect(out).To(MatchRegexp("^NAME\\s"))
|
||||
Expect(out).To(MatchRegexp("\npod-1\\s"))
|
||||
gomega.Expect(out).To(gomega.MatchRegexp("^NAME\\s"))
|
||||
gomega.Expect(out).To(gomega.MatchRegexp("\npod-1\\s"))
|
||||
framework.Logf("Table:\n%s", out)
|
||||
})
|
||||
|
||||
It("should return chunks of table results for list calls", func() {
|
||||
ginkgo.It("should return chunks of table results for list calls", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
client := c.CoreV1().PodTemplates(ns)
|
||||
|
||||
By("creating a large number of resources")
|
||||
ginkgo.By("creating a large number of resources")
|
||||
workqueue.ParallelizeUntil(context.TODO(), 5, 20, func(i int) {
|
||||
for tries := 3; tries >= 0; tries-- {
|
||||
_, err := client.Create(&v1.PodTemplate{
|
||||
|
@ -99,7 +99,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||
}
|
||||
framework.Logf("Got an error creating template %d: %v", i, err)
|
||||
}
|
||||
Fail("Unable to create template %d, exiting", i)
|
||||
ginkgo.Fail("Unable to create template %d, exiting", i)
|
||||
})
|
||||
|
||||
pagedTable := &metav1beta1.Table{}
|
||||
|
@ -107,44 +107,44 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||
VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec).
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
||||
Do().Into(pagedTable)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
Expect(len(pagedTable.Rows)).To(Equal(2))
|
||||
Expect(pagedTable.ResourceVersion).ToNot(Equal(""))
|
||||
Expect(pagedTable.SelfLink).ToNot(Equal(""))
|
||||
Expect(pagedTable.Continue).ToNot(Equal(""))
|
||||
Expect(pagedTable.Rows[0].Cells[0]).To(Equal("template-0000"))
|
||||
Expect(pagedTable.Rows[1].Cells[0]).To(Equal("template-0001"))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
gomega.Expect(len(pagedTable.Rows)).To(gomega.Equal(2))
|
||||
gomega.Expect(pagedTable.ResourceVersion).ToNot(gomega.Equal(""))
|
||||
gomega.Expect(pagedTable.SelfLink).ToNot(gomega.Equal(""))
|
||||
gomega.Expect(pagedTable.Continue).ToNot(gomega.Equal(""))
|
||||
gomega.Expect(pagedTable.Rows[0].Cells[0]).To(gomega.Equal("template-0000"))
|
||||
gomega.Expect(pagedTable.Rows[1].Cells[0]).To(gomega.Equal("template-0001"))
|
||||
|
||||
err = c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates").
|
||||
VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec).
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
||||
Do().Into(pagedTable)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
Expect(len(pagedTable.Rows)).To(BeNumerically(">", 0))
|
||||
Expect(pagedTable.Rows[0].Cells[0]).To(Equal("template-0002"))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
gomega.Expect(len(pagedTable.Rows)).To(gomega.BeNumerically(">", 0))
|
||||
gomega.Expect(pagedTable.Rows[0].Cells[0]).To(gomega.Equal("template-0002"))
|
||||
})
|
||||
|
||||
It("should return generic metadata details across all namespaces for nodes", func() {
|
||||
ginkgo.It("should return generic metadata details across all namespaces for nodes", func() {
|
||||
c := f.ClientSet
|
||||
|
||||
table := &metav1beta1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get nodes in Table form across all namespaces")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get nodes in Table form across all namespaces")
|
||||
framework.Logf("Table: %#v", table)
|
||||
|
||||
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">=", 2))
|
||||
Expect(len(table.Rows)).To(BeNumerically(">=", 1))
|
||||
Expect(len(table.Rows[0].Cells)).To(Equal(len(table.ColumnDefinitions)))
|
||||
Expect(table.ColumnDefinitions[0].Name).To(Equal("Name"))
|
||||
Expect(table.ResourceVersion).ToNot(Equal(""))
|
||||
Expect(table.SelfLink).ToNot(Equal(""))
|
||||
gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">=", 2))
|
||||
gomega.Expect(len(table.Rows)).To(gomega.BeNumerically(">=", 1))
|
||||
gomega.Expect(len(table.Rows[0].Cells)).To(gomega.Equal(len(table.ColumnDefinitions)))
|
||||
gomega.Expect(table.ColumnDefinitions[0].Name).To(gomega.Equal("Name"))
|
||||
gomega.Expect(table.ResourceVersion).ToNot(gomega.Equal(""))
|
||||
gomega.Expect(table.SelfLink).ToNot(gomega.Equal(""))
|
||||
|
||||
out := printTable(table)
|
||||
Expect(out).To(MatchRegexp("^NAME\\s"))
|
||||
gomega.Expect(out).To(gomega.MatchRegexp("^NAME\\s"))
|
||||
framework.Logf("Table:\n%s", out)
|
||||
})
|
||||
|
||||
It("should return a 406 for a backend which does not implement metadata", func() {
|
||||
ginkgo.It("should return a 406 for a backend which does not implement metadata", func() {
|
||||
c := f.ClientSet
|
||||
|
||||
table := &metav1beta1.Table{}
|
||||
|
@ -157,8 +157,8 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||
},
|
||||
}
|
||||
err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Body(sar).Do().Into(table)
|
||||
Expect(err).To(HaveOccurred(), "failed to return error when posting self subject access review: %+v, to a backend that does not implement metadata", sar)
|
||||
Expect(err.(errors.APIStatus).Status().Code).To(Equal(int32(406)))
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "failed to return error when posting self subject access review: %+v, to a backend that does not implement metadata", sar)
|
||||
gomega.Expect(err.(errors.APIStatus).Status().Code).To(gomega.Equal(int32(406)))
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -166,7 +166,7 @@ func printTable(table *metav1beta1.Table) string {
|
|||
buf := &bytes.Buffer{}
|
||||
tw := tabwriter.NewWriter(buf, 5, 8, 1, ' ', 0)
|
||||
err := printers.PrintTable(table, tw, printers.PrintOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to print table: %+v", table)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to print table: %+v", table)
|
||||
tw.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
|
|
@ -28,8 +28,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -55,17 +55,17 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
By("creating a watch on configmaps with label A")
|
||||
ginkgo.By("creating a watch on configmaps with label A")
|
||||
watchA, err := watchConfigMaps(f, "", multipleWatchersLabelValueA)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA)
|
||||
|
||||
By("creating a watch on configmaps with label B")
|
||||
ginkgo.By("creating a watch on configmaps with label B")
|
||||
watchB, err := watchConfigMaps(f, "", multipleWatchersLabelValueB)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB)
|
||||
|
||||
By("creating a watch on configmaps with label A or B")
|
||||
ginkgo.By("creating a watch on configmaps with label A or B")
|
||||
watchAB, err := watchConfigMaps(f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||
|
||||
testConfigMapA := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -84,48 +84,48 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
},
|
||||
}
|
||||
|
||||
By("creating a configmap with label A and ensuring the correct watchers observe the notification")
|
||||
ginkgo.By("creating a configmap with label A and ensuring the correct watchers observe the notification")
|
||||
testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapA)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns)
|
||||
expectEvent(watchA, watch.Added, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Added, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Added, testConfigMapA)
|
||||
|
||||
By("modifying configmap A and ensuring the correct watchers observe the notification")
|
||||
ginkgo.By("modifying configmap A and ensuring the correct watchers observe the notification")
|
||||
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
expectEvent(watchA, watch.Modified, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
||||
|
||||
By("modifying configmap A again and ensuring the correct watchers observe the notification")
|
||||
ginkgo.By("modifying configmap A again and ensuring the correct watchers observe the notification")
|
||||
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
expectEvent(watchA, watch.Modified, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
||||
|
||||
By("deleting configmap A and ensuring the correct watchers observe the notification")
|
||||
ginkgo.By("deleting configmap A and ensuring the correct watchers observe the notification")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapA.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
expectEvent(watchA, watch.Deleted, nil)
|
||||
expectEvent(watchAB, watch.Deleted, nil)
|
||||
expectNoEvent(watchB, watch.Deleted, nil)
|
||||
|
||||
By("creating a configmap with label B and ensuring the correct watchers observe the notification")
|
||||
ginkgo.By("creating a configmap with label B and ensuring the correct watchers observe the notification")
|
||||
testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapB)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMapB, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMapB, ns)
|
||||
expectEvent(watchB, watch.Added, testConfigMapB)
|
||||
expectEvent(watchAB, watch.Added, testConfigMapB)
|
||||
expectNoEvent(watchA, watch.Added, testConfigMapB)
|
||||
|
||||
By("deleting configmap B and ensuring the correct watchers observe the notification")
|
||||
ginkgo.By("deleting configmap B and ensuring the correct watchers observe the notification")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapB.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns)
|
||||
expectEvent(watchB, watch.Deleted, nil)
|
||||
expectEvent(watchAB, watch.Deleted, nil)
|
||||
expectNoEvent(watchA, watch.Deleted, nil)
|
||||
|
@ -149,31 +149,31 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
},
|
||||
}
|
||||
|
||||
By("creating a new configmap")
|
||||
ginkgo.By("creating a new configmap")
|
||||
testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
|
||||
By("modifying the configmap once")
|
||||
ginkgo.By("modifying the configmap once")
|
||||
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
|
||||
By("modifying the configmap a second time")
|
||||
ginkgo.By("modifying the configmap a second time")
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns)
|
||||
|
||||
By("deleting the configmap")
|
||||
ginkgo.By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
|
||||
By("creating a watch on configmaps from the resource version returned by the first update")
|
||||
ginkgo.By("creating a watch on configmaps from the resource version returned by the first update")
|
||||
testWatch, err := watchConfigMaps(f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion)
|
||||
|
||||
By("Expecting to observe notifications for all changes to the configmap after the first update")
|
||||
ginkgo.By("Expecting to observe notifications for all changes to the configmap after the first update")
|
||||
expectEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
||||
expectEvent(testWatch, watch.Deleted, nil)
|
||||
})
|
||||
|
@ -198,21 +198,21 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
},
|
||||
}
|
||||
|
||||
By("creating a watch on configmaps")
|
||||
ginkgo.By("creating a watch on configmaps")
|
||||
testWatchBroken, err := watchConfigMaps(f, "", watchRestartedLabelValue)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmap with label: %s", watchRestartedLabelValue)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmap with label: %s", watchRestartedLabelValue)
|
||||
|
||||
By("creating a new configmap")
|
||||
ginkgo.By("creating a new configmap")
|
||||
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
By("modifying the configmap once")
|
||||
ginkgo.By("modifying the configmap once")
|
||||
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
By("closing the watch once it receives two notifications")
|
||||
ginkgo.By("closing the watch once it receives two notifications")
|
||||
expectEvent(testWatchBroken, watch.Added, testConfigMap)
|
||||
lastEvent, ok := waitForEvent(testWatchBroken, watch.Modified, nil, 1*time.Minute)
|
||||
if !ok {
|
||||
|
@ -220,25 +220,25 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
}
|
||||
testWatchBroken.Stop()
|
||||
|
||||
By("modifying the configmap a second time, while the watch is closed")
|
||||
ginkgo.By("modifying the configmap a second time, while the watch is closed")
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
|
||||
By("creating a new watch on configmaps from the last resource version observed by the first watch")
|
||||
ginkgo.By("creating a new watch on configmaps from the last resource version observed by the first watch")
|
||||
lastEventConfigMap, ok := lastEvent.Object.(*v1.ConfigMap)
|
||||
if !ok {
|
||||
framework.Failf("Expected last notification to refer to a configmap but got: %v", lastEvent)
|
||||
}
|
||||
testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
|
||||
|
||||
By("deleting the configmap")
|
||||
ginkgo.By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
By("Expecting to observe notifications for all changes to the configmap since the first watch closed")
|
||||
ginkgo.By("Expecting to observe notifications for all changes to the configmap since the first watch closed")
|
||||
expectEvent(testWatchRestarted, watch.Modified, testConfigMapSecondUpdate)
|
||||
expectEvent(testWatchRestarted, watch.Deleted, nil)
|
||||
})
|
||||
|
@ -263,57 +263,57 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
},
|
||||
}
|
||||
|
||||
By("creating a watch on configmaps with a certain label")
|
||||
ginkgo.By("creating a watch on configmaps with a certain label")
|
||||
testWatch, err := watchConfigMaps(f, "", toBeChangedLabelValue)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmap with label: %s", toBeChangedLabelValue)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmap with label: %s", toBeChangedLabelValue)
|
||||
|
||||
By("creating a new configmap")
|
||||
ginkgo.By("creating a new configmap")
|
||||
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
By("modifying the configmap once")
|
||||
ginkgo.By("modifying the configmap once")
|
||||
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
By("changing the label value of the configmap")
|
||||
ginkgo.By("changing the label value of the configmap")
|
||||
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value"
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value", configMapName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value", configMapName, ns)
|
||||
|
||||
By("Expecting to observe a delete notification for the watched object")
|
||||
ginkgo.By("Expecting to observe a delete notification for the watched object")
|
||||
expectEvent(testWatch, watch.Added, testConfigMap)
|
||||
expectEvent(testWatch, watch.Modified, testConfigMapFirstUpdate)
|
||||
expectEvent(testWatch, watch.Deleted, nil)
|
||||
|
||||
By("modifying the configmap a second time")
|
||||
ginkgo.By("modifying the configmap a second time")
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
|
||||
By("Expecting not to observe a notification because the object no longer meets the selector's requirements")
|
||||
ginkgo.By("Expecting not to observe a notification because the object no longer meets the selector's requirements")
|
||||
expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
||||
|
||||
By("changing the label value of the configmap back")
|
||||
ginkgo.By("changing the label value of the configmap back")
|
||||
testConfigMapLabelRestored, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns)
|
||||
|
||||
By("modifying the configmap a third time")
|
||||
ginkgo.By("modifying the configmap a third time")
|
||||
testConfigMapThirdUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "3")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a third time", configMapName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a third time", configMapName, ns)
|
||||
|
||||
By("deleting the configmap")
|
||||
ginkgo.By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
By("Expecting to observe an add notification for the watched object when the label value was restored")
|
||||
ginkgo.By("Expecting to observe an add notification for the watched object when the label value was restored")
|
||||
expectEvent(testWatch, watch.Added, testConfigMapLabelRestored)
|
||||
expectEvent(testWatch, watch.Modified, testConfigMapThirdUpdate)
|
||||
expectEvent(testWatch, watch.Deleted, nil)
|
||||
|
@ -332,21 +332,21 @@ var _ = SIGDescribe("Watchers", func() {
|
|||
|
||||
iterations := 100
|
||||
|
||||
By("starting a background goroutine to produce watch events")
|
||||
ginkgo.By("starting a background goroutine to produce watch events")
|
||||
donec := make(chan struct{})
|
||||
stopc := make(chan struct{})
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer close(donec)
|
||||
produceConfigMapEvents(f, stopc, 5*time.Millisecond)
|
||||
}()
|
||||
|
||||
By("creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order")
|
||||
ginkgo.By("creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order")
|
||||
wcs := []watch.Interface{}
|
||||
resourceVersion := "0"
|
||||
for i := 0; i < iterations; i++ {
|
||||
wc, err := c.CoreV1().ConfigMaps(ns).Watch(metav1.ListOptions{ResourceVersion: resourceVersion})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to watch configmaps in the namespace %s", ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to watch configmaps in the namespace %s", ns)
|
||||
wcs = append(wcs, wc)
|
||||
resourceVersion = waitForNextConfigMapEvent(wcs[0]).ResourceVersion
|
||||
for _, wc := range wcs[1:] {
|
||||
|
@ -435,9 +435,8 @@ func waitForNextConfigMapEvent(watch watch.Interface) *v1.ConfigMap {
|
|||
case event := <-watch.ResultChan():
|
||||
if configMap, ok := event.Object.(*v1.ConfigMap); ok {
|
||||
return configMap
|
||||
} else {
|
||||
framework.Failf("expected config map")
|
||||
}
|
||||
framework.Failf("expected config map")
|
||||
case <-time.After(10 * time.Second):
|
||||
framework.Failf("timed out waiting for watch event")
|
||||
}
|
||||
|
@ -473,18 +472,18 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa
|
|||
case createEvent:
|
||||
cm.Name = name(i)
|
||||
_, err := c.CoreV1().ConfigMaps(ns).Create(cm)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create configmap %s in namespace %s", cm.Name, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create configmap %s in namespace %s", cm.Name, ns)
|
||||
existing = append(existing, i)
|
||||
i += 1
|
||||
i++
|
||||
case updateEvent:
|
||||
idx := rand.Intn(len(existing))
|
||||
cm.Name = name(existing[idx])
|
||||
_, err := c.CoreV1().ConfigMaps(ns).Update(cm)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %s in namespace %s", cm.Name, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to update configmap %s in namespace %s", cm.Name, ns)
|
||||
case deleteEvent:
|
||||
idx := rand.Intn(len(existing))
|
||||
err := c.CoreV1().ConfigMaps(ns).Delete(name(existing[idx]), &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns)
|
||||
existing = append(existing[:idx], existing[idx+1:]...)
|
||||
default:
|
||||
framework.Failf("Unsupported event operation: %d", op)
|
||||
|
|
|
@ -42,8 +42,9 @@ import (
|
|||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
// ensure libs have a chance to initialize
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
@ -93,7 +94,7 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
|||
var client clientset.Interface
|
||||
var namespaceName string
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
client = f.ClientSet
|
||||
namespaceName = f.Namespace.Name
|
||||
|
||||
|
@ -106,7 +107,7 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
|||
framework.Skipf("dynamic configuration of webhooks requires the admissionregistration.k8s.io group to be enabled")
|
||||
}
|
||||
|
||||
By("Setting up server cert")
|
||||
ginkgo.By("Setting up server cert")
|
||||
context = setupServerCert(namespaceName, serviceName)
|
||||
createAuthReaderRoleBinding(f, namespaceName)
|
||||
|
||||
|
@ -116,23 +117,23 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
|||
deployWebhookAndService(f, imageutils.GetE2EImage(imageutils.AdmissionWebhook), context)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
cleanWebhookTest(client, namespaceName)
|
||||
})
|
||||
|
||||
It("Should be able to deny pod and configmap creation", func() {
|
||||
ginkgo.It("Should be able to deny pod and configmap creation", func() {
|
||||
webhookCleanup := registerWebhook(f, context)
|
||||
defer webhookCleanup()
|
||||
testWebhook(f)
|
||||
})
|
||||
|
||||
It("Should be able to deny attaching pod", func() {
|
||||
ginkgo.It("Should be able to deny attaching pod", func() {
|
||||
webhookCleanup := registerWebhookForAttachingPod(f, context)
|
||||
defer webhookCleanup()
|
||||
testAttachingPodWebhook(f)
|
||||
})
|
||||
|
||||
It("Should be able to deny custom resource creation", func() {
|
||||
ginkgo.It("Should be able to deny custom resource creation", func() {
|
||||
testcrd, err := crd.CreateTestCRD(f)
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -143,25 +144,25 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
|||
testCustomResourceWebhook(f, testcrd.Crd, testcrd.GetV1DynamicClient())
|
||||
})
|
||||
|
||||
It("Should unconditionally reject operations on fail closed webhook", func() {
|
||||
ginkgo.It("Should unconditionally reject operations on fail closed webhook", func() {
|
||||
webhookCleanup := registerFailClosedWebhook(f, context)
|
||||
defer webhookCleanup()
|
||||
testFailClosedWebhook(f)
|
||||
})
|
||||
|
||||
It("Should mutate configmap", func() {
|
||||
ginkgo.It("Should mutate configmap", func() {
|
||||
webhookCleanup := registerMutatingWebhookForConfigMap(f, context)
|
||||
defer webhookCleanup()
|
||||
testMutatingConfigMapWebhook(f)
|
||||
})
|
||||
|
||||
It("Should mutate pod and apply defaults after mutation", func() {
|
||||
ginkgo.It("Should mutate pod and apply defaults after mutation", func() {
|
||||
webhookCleanup := registerMutatingWebhookForPod(f, context)
|
||||
defer webhookCleanup()
|
||||
testMutatingPodWebhook(f)
|
||||
})
|
||||
|
||||
It("Should not be able to mutate or prevent deletion of webhook configuration objects", func() {
|
||||
ginkgo.It("Should not be able to mutate or prevent deletion of webhook configuration objects", func() {
|
||||
validatingWebhookCleanup := registerValidatingWebhookForWebhookConfigurations(f, context)
|
||||
defer validatingWebhookCleanup()
|
||||
mutatingWebhookCleanup := registerMutatingWebhookForWebhookConfigurations(f, context)
|
||||
|
@ -169,7 +170,7 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
|||
testWebhooksForWebhookConfigurations(f)
|
||||
})
|
||||
|
||||
It("Should mutate custom resource", func() {
|
||||
ginkgo.It("Should mutate custom resource", func() {
|
||||
testcrd, err := crd.CreateTestCRD(f)
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -180,14 +181,14 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
|||
testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.GetV1DynamicClient())
|
||||
})
|
||||
|
||||
It("Should deny crd creation", func() {
|
||||
ginkgo.It("Should deny crd creation", func() {
|
||||
crdWebhookCleanup := registerValidatingWebhookForCRD(f, context)
|
||||
defer crdWebhookCleanup()
|
||||
|
||||
testCRDDenyWebhook(f)
|
||||
})
|
||||
|
||||
It("Should mutate custom resource with different stored version", func() {
|
||||
ginkgo.It("Should mutate custom resource with different stored version", func() {
|
||||
testcrd, err := crd.CreateMultiVersionTestCRDWithV1Storage(f)
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -198,33 +199,33 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
|||
testMultiVersionCustomResourceWebhook(f, testcrd)
|
||||
})
|
||||
|
||||
It("Should deny crd creation", func() {
|
||||
ginkgo.It("Should deny crd creation", func() {
|
||||
crdWebhookCleanup := registerValidatingWebhookForCRD(f, context)
|
||||
defer crdWebhookCleanup()
|
||||
|
||||
testCRDDenyWebhook(f)
|
||||
})
|
||||
|
||||
It("Should honor timeout", func() {
|
||||
ginkgo.It("Should honor timeout", func() {
|
||||
policyFail := v1beta1.Fail
|
||||
policyIgnore := v1beta1.Ignore
|
||||
|
||||
By("Setting timeout (1s) shorter than webhook latency (5s)")
|
||||
ginkgo.By("Setting timeout (1s) shorter than webhook latency (5s)")
|
||||
slowWebhookCleanup := registerSlowWebhook(f, context, &policyFail, pointer.Int32Ptr(1))
|
||||
testSlowWebhookTimeoutFailEarly(f)
|
||||
slowWebhookCleanup()
|
||||
|
||||
By("Having no error when timeout is shorter than webhook latency and failure policy is ignore")
|
||||
ginkgo.By("Having no error when timeout is shorter than webhook latency and failure policy is ignore")
|
||||
slowWebhookCleanup = registerSlowWebhook(f, context, &policyIgnore, pointer.Int32Ptr(1))
|
||||
testSlowWebhookTimeoutNoError(f)
|
||||
slowWebhookCleanup()
|
||||
|
||||
By("Having no error when timeout is longer than webhook latency")
|
||||
ginkgo.By("Having no error when timeout is longer than webhook latency")
|
||||
slowWebhookCleanup = registerSlowWebhook(f, context, &policyFail, pointer.Int32Ptr(10))
|
||||
testSlowWebhookTimeoutNoError(f)
|
||||
slowWebhookCleanup()
|
||||
|
||||
By("Having no error when timeout is empty (defaulted to 10s in v1beta1)")
|
||||
ginkgo.By("Having no error when timeout is empty (defaulted to 10s in v1beta1)")
|
||||
slowWebhookCleanup = registerSlowWebhook(f, context, &policyFail, nil)
|
||||
testSlowWebhookTimeoutNoError(f)
|
||||
slowWebhookCleanup()
|
||||
|
@ -240,7 +241,7 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
|||
})
|
||||
|
||||
func createAuthReaderRoleBinding(f *framework.Framework, namespace string) {
|
||||
By("Create role binding to let webhook read extension-apiserver-authentication")
|
||||
ginkgo.By("Create role binding to let webhook read extension-apiserver-authentication")
|
||||
client := f.ClientSet
|
||||
// Create the role binding to allow the webhook read the extension-apiserver-authentication configmap
|
||||
_, err := client.RbacV1beta1().RoleBindings("kube-system").Create(&rbacv1beta1.RoleBinding{
|
||||
|
@ -272,7 +273,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) {
|
|||
}
|
||||
|
||||
func deployWebhookAndService(f *framework.Framework, image string, context *certContext) {
|
||||
By("Deploying the webhook pod")
|
||||
ginkgo.By("Deploying the webhook pod")
|
||||
client := f.ClientSet
|
||||
|
||||
// Creating the secret that contains the webhook's cert.
|
||||
|
@ -350,13 +351,13 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert
|
|||
}
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
|
||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
|
||||
By("Wait for the deployment to be ready")
|
||||
ginkgo.By("Wait for the deployment to be ready")
|
||||
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
|
||||
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
|
||||
err = framework.WaitForDeploymentComplete(client, deployment)
|
||||
framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentName, namespace)
|
||||
|
||||
By("Deploying the webhook service")
|
||||
ginkgo.By("Deploying the webhook service")
|
||||
|
||||
serviceLabels := map[string]string{"webhook": "true"}
|
||||
service := &v1.Service{
|
||||
|
@ -379,7 +380,7 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert
|
|||
_, err = client.CoreV1().Services(namespace).Create(service)
|
||||
framework.ExpectNoError(err, "creating service %s in namespace %s", serviceName, namespace)
|
||||
|
||||
By("Verifying the service has paired with the endpoint")
|
||||
ginkgo.By("Verifying the service has paired with the endpoint")
|
||||
err = framework.WaitForServiceEndpointsNum(client, namespace, serviceName, 1, 1*time.Second, 30*time.Second)
|
||||
framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceName, 1)
|
||||
}
|
||||
|
@ -388,7 +389,7 @@ func strPtr(s string) *string { return &s }
|
|||
|
||||
func registerWebhook(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the webhook via the AdmissionRegistration API")
|
||||
ginkgo.By("Registering the webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := webhookConfigName
|
||||
|
@ -469,7 +470,7 @@ func registerWebhook(f *framework.Framework, context *certContext) func() {
|
|||
|
||||
func registerWebhookForAttachingPod(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the webhook via the AdmissionRegistration API")
|
||||
ginkgo.By("Registering the webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := attachingPodWebhookConfigName
|
||||
|
@ -517,7 +518,7 @@ func registerWebhookForAttachingPod(f *framework.Framework, context *certContext
|
|||
|
||||
func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the mutating configmap webhook via the AdmissionRegistration API")
|
||||
ginkgo.By("Registering the mutating configmap webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := mutatingWebhookConfigName
|
||||
|
@ -577,11 +578,11 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certCo
|
|||
}
|
||||
|
||||
func testMutatingConfigMapWebhook(f *framework.Framework) {
|
||||
By("create a configmap that should be updated by the webhook")
|
||||
ginkgo.By("create a configmap that should be updated by the webhook")
|
||||
client := f.ClientSet
|
||||
configMap := toBeMutatedConfigMap(f)
|
||||
mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
|
||||
Expect(err).To(BeNil())
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
expectedConfigMapData := map[string]string{
|
||||
"mutation-start": "yes",
|
||||
"mutation-stage-1": "yes",
|
||||
|
@ -594,7 +595,7 @@ func testMutatingConfigMapWebhook(f *framework.Framework) {
|
|||
|
||||
func registerMutatingWebhookForPod(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the mutating pod webhook via the AdmissionRegistration API")
|
||||
ginkgo.By("Registering the mutating pod webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := podMutatingWebhookConfigName
|
||||
|
@ -635,11 +636,11 @@ func registerMutatingWebhookForPod(f *framework.Framework, context *certContext)
|
|||
}
|
||||
|
||||
func testMutatingPodWebhook(f *framework.Framework) {
|
||||
By("create a pod that should be updated by the webhook")
|
||||
ginkgo.By("create a pod that should be updated by the webhook")
|
||||
client := f.ClientSet
|
||||
configMap := toBeMutatedPod(f)
|
||||
mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(configMap)
|
||||
Expect(err).To(BeNil())
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
if len(mutatedPod.Spec.InitContainers) != 1 {
|
||||
framework.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers)
|
||||
}
|
||||
|
@ -668,12 +669,12 @@ func toBeMutatedPod(f *framework.Framework) *v1.Pod {
|
|||
}
|
||||
|
||||
func testWebhook(f *framework.Framework) {
|
||||
By("create a pod that should be denied by the webhook")
|
||||
ginkgo.By("create a pod that should be denied by the webhook")
|
||||
client := f.ClientSet
|
||||
// Creating the pod, the request should be rejected
|
||||
pod := nonCompliantPod(f)
|
||||
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(HaveOccurred(), "create pod %s in namespace %s should have been denied by webhook", pod.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "create pod %s in namespace %s should have been denied by webhook", pod.Name, f.Namespace.Name)
|
||||
expectedErrMsg1 := "the pod contains unwanted container name"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg1) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
|
||||
|
@ -683,28 +684,28 @@ func testWebhook(f *framework.Framework) {
|
|||
framework.Failf("expect error contains %q, got %q", expectedErrMsg2, err.Error())
|
||||
}
|
||||
|
||||
By("create a pod that causes the webhook to hang")
|
||||
ginkgo.By("create a pod that causes the webhook to hang")
|
||||
client = f.ClientSet
|
||||
// Creating the pod, the request should be rejected
|
||||
pod = hangingPod(f)
|
||||
_, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(HaveOccurred(), "create pod %s in namespace %s should have caused webhook to hang", pod.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "create pod %s in namespace %s should have caused webhook to hang", pod.Name, f.Namespace.Name)
|
||||
expectedTimeoutErr := "request did not complete within"
|
||||
if !strings.Contains(err.Error(), expectedTimeoutErr) {
|
||||
framework.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error())
|
||||
}
|
||||
|
||||
By("create a configmap that should be denied by the webhook")
|
||||
ginkgo.By("create a configmap that should be denied by the webhook")
|
||||
// Creating the configmap, the request should be rejected
|
||||
configmap := nonCompliantConfigMap(f)
|
||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
||||
Expect(err).To(HaveOccurred(), "create configmap %s in namespace %s should have been denied by the webhook", configmap.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "create configmap %s in namespace %s should have been denied by the webhook", configmap.Name, f.Namespace.Name)
|
||||
expectedErrMsg := "the configmap contains unwanted key and value"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
|
||||
By("create a configmap that should be admitted by the webhook")
|
||||
ginkgo.By("create a configmap that should be admitted by the webhook")
|
||||
// Creating the configmap, the request should be admitted
|
||||
configmap = &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -715,9 +716,9 @@ func testWebhook(f *framework.Framework) {
|
|||
},
|
||||
}
|
||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
|
||||
|
||||
By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||
ginkgo.By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||
toNonCompliantFn := func(cm *v1.ConfigMap) {
|
||||
if cm.Data == nil {
|
||||
cm.Data = map[string]string{}
|
||||
|
@ -725,20 +726,20 @@ func testWebhook(f *framework.Framework) {
|
|||
cm.Data["webhook-e2e-test"] = "webhook-disallow"
|
||||
}
|
||||
_, err = updateConfigMap(client, f.Namespace.Name, allowedConfigMapName, toNonCompliantFn)
|
||||
Expect(err).To(HaveOccurred(), "update (PUT) admitted configmap %s in namespace %s to a non-compliant one should be rejected by webhook", allowedConfigMapName, f.Namespace.Name)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "update (PUT) admitted configmap %s in namespace %s to a non-compliant one should be rejected by webhook", allowedConfigMapName, f.Namespace.Name)
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
|
||||
By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||
ginkgo.By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||
patch := nonCompliantConfigMapPatch()
|
||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(allowedConfigMapName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).To(HaveOccurred(), "update admitted configmap %s in namespace %s by strategic merge patch to a non-compliant one should be rejected by webhook. Patch: %+v", allowedConfigMapName, f.Namespace.Name, patch)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "update admitted configmap %s in namespace %s by strategic merge patch to a non-compliant one should be rejected by webhook. Patch: %+v", allowedConfigMapName, f.Namespace.Name, patch)
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
|
||||
By("create a namespace that bypass the webhook")
|
||||
ginkgo.By("create a namespace that bypass the webhook")
|
||||
err = createNamespace(f, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{
|
||||
Name: skippedNamespaceName,
|
||||
Labels: map[string]string{
|
||||
|
@ -749,26 +750,26 @@ func testWebhook(f *framework.Framework) {
|
|||
// clean up the namespace
|
||||
defer client.CoreV1().Namespaces().Delete(skippedNamespaceName, nil)
|
||||
|
||||
By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
|
||||
ginkgo.By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
|
||||
configmap = nonCompliantConfigMap(f)
|
||||
_, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(configmap)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName)
|
||||
}
|
||||
|
||||
func testAttachingPodWebhook(f *framework.Framework) {
|
||||
By("create a pod")
|
||||
ginkgo.By("create a pod")
|
||||
client := f.ClientSet
|
||||
pod := toBeAttachedPod(f)
|
||||
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
err = framework.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
|
||||
By("'kubectl attach' the pod, should be denied by the webhook")
|
||||
ginkgo.By("'kubectl attach' the pod, should be denied by the webhook")
|
||||
timer := time.NewTimer(30 * time.Second)
|
||||
defer timer.Stop()
|
||||
_, err = framework.NewKubectlCommand("attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec()
|
||||
Expect(err).To(HaveOccurred(), "'kubectl attach' the pod, should be denied by the webhook")
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "'kubectl attach' the pod, should be denied by the webhook")
|
||||
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
|
||||
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
|
||||
}
|
||||
|
@ -802,7 +803,7 @@ func failingWebhook(namespace, name string) v1beta1.Webhook {
|
|||
|
||||
func registerFailClosedWebhook(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API")
|
||||
ginkgo.By("Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := webhookFailClosedConfigName
|
||||
|
@ -841,7 +842,7 @@ func registerFailClosedWebhook(f *framework.Framework, context *certContext) fun
|
|||
|
||||
func testFailClosedWebhook(f *framework.Framework) {
|
||||
client := f.ClientSet
|
||||
By("create a namespace for the webhook")
|
||||
ginkgo.By("create a namespace for the webhook")
|
||||
err := createNamespace(f, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{
|
||||
Name: failNamespaceName,
|
||||
Labels: map[string]string{
|
||||
|
@ -851,14 +852,14 @@ func testFailClosedWebhook(f *framework.Framework) {
|
|||
framework.ExpectNoError(err, "creating namespace %q", failNamespaceName)
|
||||
defer client.CoreV1().Namespaces().Delete(failNamespaceName, nil)
|
||||
|
||||
By("create a configmap should be unconditionally rejected by the webhook")
|
||||
ginkgo.By("create a configmap should be unconditionally rejected by the webhook")
|
||||
configmap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
}
|
||||
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap)
|
||||
Expect(err).To(HaveOccurred(), "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName)
|
||||
if !errors.IsInternalError(err) {
|
||||
framework.Failf("expect an internal error, got %#v", err)
|
||||
}
|
||||
|
@ -867,7 +868,7 @@ func testFailClosedWebhook(f *framework.Framework) {
|
|||
func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() {
|
||||
var err error
|
||||
client := f.ClientSet
|
||||
By("Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
|
||||
ginkgo.By("Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := validatingWebhookForWebhooksConfigName
|
||||
|
@ -920,7 +921,7 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c
|
|||
func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() {
|
||||
var err error
|
||||
client := f.ClientSet
|
||||
By("Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
|
||||
ginkgo.By("Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := mutatingWebhookForWebhooksConfigName
|
||||
|
@ -976,7 +977,7 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con
|
|||
func testWebhooksForWebhookConfigurations(f *framework.Framework) {
|
||||
var err error
|
||||
client := f.ClientSet
|
||||
By("Creating a dummy validating-webhook-configuration object")
|
||||
ginkgo.By("Creating a dummy validating-webhook-configuration object")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
failurePolicy := v1beta1.Ignore
|
||||
|
@ -1022,12 +1023,12 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework) {
|
|||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
By("Deleting the validating-webhook-configuration, which should be possible to remove")
|
||||
ginkgo.By("Deleting the validating-webhook-configuration, which should be possible to remove")
|
||||
|
||||
err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(dummyValidatingWebhookConfigName, nil)
|
||||
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", dummyValidatingWebhookConfigName, namespace)
|
||||
|
||||
By("Creating a dummy mutating-webhook-configuration object")
|
||||
ginkgo.By("Creating a dummy mutating-webhook-configuration object")
|
||||
|
||||
mutatedMutatingWebhookConfiguration, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -1070,7 +1071,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework) {
|
|||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
By("Deleting the mutating-webhook-configuration, which should be possible to remove")
|
||||
ginkgo.By("Deleting the mutating-webhook-configuration, which should be possible to remove")
|
||||
|
||||
err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(dummyMutatingWebhookConfigName, nil)
|
||||
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", dummyMutatingWebhookConfigName, namespace)
|
||||
|
@ -1200,7 +1201,7 @@ func cleanWebhookTest(client clientset.Interface, namespaceName string) {
|
|||
|
||||
func registerWebhookForCustomResource(f *framework.Framework, context *certContext, testcrd *crd.TestCrd) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the custom resource webhook via the AdmissionRegistration API")
|
||||
ginkgo.By("Registering the custom resource webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := crWebhookConfigName
|
||||
|
@ -1242,7 +1243,7 @@ func registerWebhookForCustomResource(f *framework.Framework, context *certConte
|
|||
|
||||
func registerMutatingWebhookForCustomResource(f *framework.Framework, context *certContext, testcrd *crd.TestCrd) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the mutating webhook for a custom resource via the AdmissionRegistration API")
|
||||
ginkgo.By("Registering the mutating webhook for a custom resource via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := crMutatingWebhookConfigName
|
||||
|
@ -1302,7 +1303,7 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, context *c
|
|||
}
|
||||
|
||||
func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
|
||||
By("Creating a custom resource that should be denied by the webhook")
|
||||
ginkgo.By("Creating a custom resource that should be denied by the webhook")
|
||||
crInstanceName := "cr-instance-1"
|
||||
crInstance := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
|
@ -1318,7 +1319,7 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1
|
|||
},
|
||||
}
|
||||
_, err := customResourceClient.Create(crInstance, metav1.CreateOptions{})
|
||||
Expect(err).To(HaveOccurred(), "create custom resource %s in namespace %s should be denied by webhook", crInstanceName, f.Namespace.Name)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "create custom resource %s in namespace %s should be denied by webhook", crInstanceName, f.Namespace.Name)
|
||||
expectedErrMsg := "the custom resource contains unwanted data"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
|
@ -1326,7 +1327,7 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1
|
|||
}
|
||||
|
||||
func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
|
||||
By("Creating a custom resource that should be mutated by the webhook")
|
||||
ginkgo.By("Creating a custom resource that should be mutated by the webhook")
|
||||
crName := "cr-instance-1"
|
||||
cr := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
|
@ -1342,7 +1343,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension
|
|||
},
|
||||
}
|
||||
mutatedCR, err := customResourceClient.Create(cr, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
expectedCRData := map[string]interface{}{
|
||||
"mutation-start": "yes",
|
||||
"mutation-stage-1": "yes",
|
||||
|
@ -1355,7 +1356,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension
|
|||
|
||||
func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd.TestCrd) {
|
||||
customResourceClient := testcrd.GetV1DynamicClient()
|
||||
By("Creating a custom resource while v1 is storage version")
|
||||
ginkgo.By("Creating a custom resource while v1 is storage version")
|
||||
crName := "cr-instance-1"
|
||||
cr := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
|
@ -1371,22 +1372,22 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd.
|
|||
},
|
||||
}
|
||||
_, err := customResourceClient.Create(cr, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
|
||||
By("Patching Custom Resource Definition to set v2 as storage")
|
||||
ginkgo.By("Patching Custom Resource Definition to set v2 as storage")
|
||||
apiVersionWithV2StoragePatch := fmt.Sprint(`{"spec": {"versions": [{"name": "v1", "storage": false, "served": true},{"name": "v2", "storage": true, "served": true}]}}`)
|
||||
_, err = testcrd.APIExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(testcrd.Crd.Name, types.StrategicMergePatchType, []byte(apiVersionWithV2StoragePatch))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to patch custom resource definition %s in namespace: %s", testcrd.Crd.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to patch custom resource definition %s in namespace: %s", testcrd.Crd.Name, f.Namespace.Name)
|
||||
|
||||
By("Patching the custom resource while v2 is storage version")
|
||||
ginkgo.By("Patching the custom resource while v2 is storage version")
|
||||
crDummyPatch := fmt.Sprint(`[{ "op": "add", "path": "/dummy", "value": "test" }]`)
|
||||
_, err = testcrd.DynamicClients["v2"].Patch(crName, types.JSONPatchType, []byte(crDummyPatch), metav1.PatchOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to patch custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to patch custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
func registerValidatingWebhookForCRD(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the crd webhook via the AdmissionRegistration API")
|
||||
ginkgo.By("Registering the crd webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := crdWebhookConfigName
|
||||
|
@ -1432,7 +1433,7 @@ func registerValidatingWebhookForCRD(f *framework.Framework, context *certContex
|
|||
}
|
||||
|
||||
func testCRDDenyWebhook(f *framework.Framework) {
|
||||
By("Creating a custom resource definition that should be denied by the webhook")
|
||||
ginkgo.By("Creating a custom resource definition that should be denied by the webhook")
|
||||
name := fmt.Sprintf("e2e-test-%s-%s-crd", f.BaseName, "deny")
|
||||
kind := fmt.Sprintf("E2e-test-%s-%s-crd", f.BaseName, "deny")
|
||||
group := fmt.Sprintf("%s-crd-test.k8s.io", f.BaseName)
|
||||
|
@ -1483,7 +1484,7 @@ func testCRDDenyWebhook(f *framework.Framework) {
|
|||
|
||||
// create CRD
|
||||
_, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd)
|
||||
Expect(err).To(HaveOccurred(), "create custom resource definition %s should be denied by webhook", testcrd.GetMetaName())
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "create custom resource definition %s should be denied by webhook", testcrd.GetMetaName())
|
||||
expectedErrMsg := "the crd contains unwanted label"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
|
@ -1492,7 +1493,7 @@ func testCRDDenyWebhook(f *framework.Framework) {
|
|||
|
||||
func registerSlowWebhook(f *framework.Framework, context *certContext, policy *v1beta1.FailurePolicyType, timeout *int32) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering slow webhook via the AdmissionRegistration API")
|
||||
ginkgo.By("Registering slow webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := slowWebhookConfigName
|
||||
|
@ -1551,11 +1552,11 @@ func registerSlowWebhook(f *framework.Framework, context *certContext, policy *v
|
|||
}
|
||||
|
||||
func testSlowWebhookTimeoutFailEarly(f *framework.Framework) {
|
||||
By("Request fails when timeout (1s) is shorter than slow webhook latency (5s)")
|
||||
ginkgo.By("Request fails when timeout (1s) is shorter than slow webhook latency (5s)")
|
||||
client := f.ClientSet
|
||||
name := "e2e-test-slow-webhook-configmap"
|
||||
_, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}})
|
||||
Expect(err).To(HaveOccurred(), "create configmap in namespace %s should have timed-out reaching slow webhook", f.Namespace.Name)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "create configmap in namespace %s should have timed-out reaching slow webhook", f.Namespace.Name)
|
||||
expectedErrMsg := `/always-allow-delay-5s?timeout=1s: context deadline exceeded`
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
|
@ -1566,7 +1567,7 @@ func testSlowWebhookTimeoutNoError(f *framework.Framework) {
|
|||
client := f.ClientSet
|
||||
name := "e2e-test-slow-webhook-configmap"
|
||||
_, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}})
|
||||
Expect(err).To(BeNil())
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(name, &metav1.DeleteOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue