k3s/test/integration/replicationcontroller/replicationcontroller_test.go

446 lines
16 KiB
Go
Raw Normal View History

2016-06-10 23:28:42 +00:00
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package replicationcontroller
2016-06-10 23:28:42 +00:00
import (
"fmt"
"net/http/httptest"
"reflect"
"testing"
"time"
2017-06-22 18:24:23 +00:00
"k8s.io/api/core/v1"
2017-01-11 14:09:48 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2017-01-16 20:13:59 +00:00
"k8s.io/apimachinery/pkg/types"
2017-01-11 14:09:48 +00:00
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
2017-01-19 18:27:59 +00:00
restclient "k8s.io/client-go/rest"
2017-01-24 14:11:51 +00:00
"k8s.io/client-go/tools/cache"
2016-06-10 23:28:42 +00:00
"k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/test/integration/framework"
)
2017-10-11 09:44:30 +00:00
const (
pollInterval = 100 * time.Millisecond
pollTimeout = 60 * time.Second
)
2016-06-10 23:28:42 +00:00
func testLabels() map[string]string {
return map[string]string{"name": "test"}
}
func newRC(name, namespace string, replicas int) *v1.ReplicationController {
replicasCopy := int32(replicas)
return &v1.ReplicationController{
2016-12-03 18:57:26 +00:00
TypeMeta: metav1.TypeMeta{
2016-06-10 23:28:42 +00:00
Kind: "ReplicationController",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
2016-06-10 23:28:42 +00:00
Namespace: namespace,
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Selector: testLabels(),
Replicas: &replicasCopy,
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
2016-06-10 23:28:42 +00:00
Labels: testLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
},
},
},
},
},
}
}
func newMatchingPod(podName, namespace string) *v1.Pod {
return &v1.Pod{
2016-12-03 18:57:26 +00:00
TypeMeta: metav1.TypeMeta{
2016-06-10 23:28:42 +00:00
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
2016-06-10 23:28:42 +00:00
Name: podName,
Namespace: namespace,
Labels: testLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
},
},
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
}
}
// verifyRemainingObjects verifies if the number of the remaining replication
// controllers and pods are rcNum and podNum. It returns error if the
// communication with the API server fails.
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) {
rcClient := clientSet.CoreV1().ReplicationControllers(namespace)
podClient := clientSet.CoreV1().Pods(namespace)
pods, err := podClient.List(metav1.ListOptions{})
2016-06-10 23:28:42 +00:00
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
var ret = true
if len(pods.Items) != podNum {
ret = false
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
}
rcs, err := rcClient.List(metav1.ListOptions{})
2016-06-10 23:28:42 +00:00
if err != nil {
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
}
if len(rcs.Items) != rcNum {
ret = false
t.Logf("expect %d RCs, got %d RCs", rcNum, len(rcs.Items))
}
return ret, nil
}
func rmSetup(t *testing.T, stopCh chan struct{}) (*httptest.Server, framework.CloseFunc, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) {
2016-06-10 23:28:42 +00:00
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s, closeFn := framework.RunAMaster(masterConfig)
2016-06-10 23:28:42 +00:00
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
2017-02-08 21:18:21 +00:00
informers := informers.NewSharedInformerFactory(clientSet, resyncPeriod)
rm := replication.NewReplicationManager(informers.Core().V1().Pods(), informers.Core().V1().ReplicationControllers(), clientSet, replication.BurstReplicas)
2017-01-02 16:35:12 +00:00
informers.Start(stopCh)
return s, closeFn, rm, informers, clientSet
2016-06-10 23:28:42 +00:00
}
2016-07-29 17:32:56 +00:00
// wait for the podInformer to observe the pods. Call this function before
// running the RC manager to prevent the rc manager from creating new pods
// rather than adopting the existing ones.
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
2017-10-11 09:44:30 +00:00
if err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
2016-07-29 17:32:56 +00:00
objects := podInformer.GetIndexer().List()
return len(objects) == podNum, nil
2016-07-29 17:32:56 +00:00
}); err != nil {
t.Fatal(err)
}
}
2016-06-10 23:28:42 +00:00
func TestAdoption(t *testing.T) {
2017-02-23 19:16:25 +00:00
boolPtr := func(b bool) *bool { return &b }
2016-06-10 23:28:42 +00:00
testCases := []struct {
name string
2016-12-09 18:16:33 +00:00
existingOwnerReferences func(rc *v1.ReplicationController) []metav1.OwnerReference
expectedOwnerReferences func(rc *v1.ReplicationController) []metav1.OwnerReference
2016-06-10 23:28:42 +00:00
}{
{
"pod refers rc as an owner, not a controller",
2016-12-09 18:16:33 +00:00
func(rc *v1.ReplicationController) []metav1.OwnerReference {
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"}}
2016-06-10 23:28:42 +00:00
},
2016-12-09 18:16:33 +00:00
func(rc *v1.ReplicationController) []metav1.OwnerReference {
2017-02-23 19:16:25 +00:00
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}}
2016-06-10 23:28:42 +00:00
},
},
{
"pod doesn't have owner references",
2016-12-09 18:16:33 +00:00
func(rc *v1.ReplicationController) []metav1.OwnerReference {
return []metav1.OwnerReference{}
2016-06-10 23:28:42 +00:00
},
2016-12-09 18:16:33 +00:00
func(rc *v1.ReplicationController) []metav1.OwnerReference {
2017-02-23 19:16:25 +00:00
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}}
2016-06-10 23:28:42 +00:00
},
},
{
"pod refers rc as a controller",
2016-12-09 18:16:33 +00:00
func(rc *v1.ReplicationController) []metav1.OwnerReference {
2017-02-23 19:16:25 +00:00
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)}}
2016-06-10 23:28:42 +00:00
},
2016-12-09 18:16:33 +00:00
func(rc *v1.ReplicationController) []metav1.OwnerReference {
2017-02-23 19:16:25 +00:00
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)}}
2016-06-10 23:28:42 +00:00
},
},
{
"pod refers other rc as the controller, refers the rc as an owner",
2016-12-09 18:16:33 +00:00
func(rc *v1.ReplicationController) []metav1.OwnerReference {
return []metav1.OwnerReference{
2017-02-23 19:16:25 +00:00
{UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)},
2016-06-10 23:28:42 +00:00
{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"},
}
},
2016-12-09 18:16:33 +00:00
func(rc *v1.ReplicationController) []metav1.OwnerReference {
return []metav1.OwnerReference{
2017-02-23 19:16:25 +00:00
{UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)},
2016-06-10 23:28:42 +00:00
{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"},
}
},
},
}
for i, tc := range testCases {
2017-01-02 16:35:12 +00:00
stopCh := make(chan struct{})
s, closeFn, rm, informers, clientSet := rmSetup(t, stopCh)
defer closeFn()
2016-06-10 23:28:42 +00:00
ns := framework.CreateTestingNamespace(fmt.Sprintf("adoption-%d", i), s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name)
podClient := clientSet.CoreV1().Pods(ns.Name)
2016-06-10 23:28:42 +00:00
const rcName = "rc"
rc, err := rcClient.Create(newRC(rcName, ns.Name, 1))
if err != nil {
t.Fatalf("Failed to create replication controller: %v", err)
}
podName := fmt.Sprintf("pod%d", i)
pod := newMatchingPod(podName, ns.Name)
2016-06-10 23:28:42 +00:00
pod.OwnerReferences = tc.existingOwnerReferences(rc)
_, err = podClient.Create(pod)
if err != nil {
t.Fatalf("Failed to create Pod: %v", err)
}
informers.Start(stopCh)
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 1)
2016-06-10 23:28:42 +00:00
go rm.Run(5, stopCh)
2017-10-11 09:44:30 +00:00
if err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
2016-12-07 14:40:26 +00:00
updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
2016-06-10 23:28:42 +00:00
if err != nil {
return false, err
}
if e, a := tc.expectedOwnerReferences(rc), updatedPod.OwnerReferences; reflect.DeepEqual(e, a) {
return true, nil
} else {
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
return false, nil
}
}); err != nil {
2017-02-23 19:16:25 +00:00
t.Fatalf("test %q failed: %v", tc.name, err)
2016-06-10 23:28:42 +00:00
}
close(stopCh)
}
}
func createRCsPods(t *testing.T, clientSet clientset.Interface, rcs []*v1.ReplicationController, pods []*v1.Pod, ns string) {
rcClient := clientSet.CoreV1().ReplicationControllers(ns)
podClient := clientSet.CoreV1().Pods(ns)
2016-06-10 23:28:42 +00:00
for _, rc := range rcs {
if _, err := rcClient.Create(rc); err != nil {
t.Fatalf("Failed to create replication controller %s: %v", rc.Name, err)
}
}
for _, pod := range pods {
if _, err := podClient.Create(pod); err != nil {
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
}
}
}
func waitRCStable(t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController, ns string) {
rcClient := clientSet.CoreV1().ReplicationControllers(ns)
2017-10-11 09:44:30 +00:00
if err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
2016-12-07 14:40:26 +00:00
updatedRC, err := rcClient.Get(rc.Name, metav1.GetOptions{})
2016-06-10 23:28:42 +00:00
if err != nil {
return false, err
}
return updatedRC.Status.Replicas == *rc.Spec.Replicas, nil
2016-06-10 23:28:42 +00:00
}); err != nil {
t.Fatal(err)
}
}
func TestUpdateSelectorToAdopt(t *testing.T) {
// We have pod1, pod2 and rc. rc.spec.replicas=1. At first rc.Selector
// matches pod1 only; change the selector to match pod2 as well. Verify
// there is only one pod left.
2017-01-02 16:35:12 +00:00
stopCh := make(chan struct{})
s, closeFn, rm, _, clientSet := rmSetup(t, stopCh)
defer closeFn()
2016-06-10 23:28:42 +00:00
ns := framework.CreateTestingNamespace("update-selector-to-adopt", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rc := newRC("rc", ns.Name, 1)
// let rc's selector only match pod1
rc.Spec.Selector["uniqueKey"] = "1"
rc.Spec.Template.Labels["uniqueKey"] = "1"
pod1 := newMatchingPod("pod1", ns.Name)
pod1.Labels["uniqueKey"] = "1"
pod2 := newMatchingPod("pod2", ns.Name)
pod2.Labels["uniqueKey"] = "2"
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
go rm.Run(5, stopCh)
waitRCStable(t, clientSet, rc, ns.Name)
// change the rc's selector to match both pods
patch := `{"spec":{"selector":{"uniqueKey":null}}}`
rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name)
2017-01-16 20:13:59 +00:00
rc, err := rcClient.Patch(rc.Name, types.StrategicMergePatchType, []byte(patch))
2016-06-10 23:28:42 +00:00
if err != nil {
t.Fatalf("Failed to patch replication controller: %v", err)
}
t.Logf("patched rc = %#v", rc)
// wait for the rc select both pods and delete one of them
2017-10-11 09:44:30 +00:00
if err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
2016-06-10 23:28:42 +00:00
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 1)
}); err != nil {
t.Fatal(err)
}
close(stopCh)
}
func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
// We have pod1, pod2 and rc. rc.spec.replicas=2. At first rc.Selector
// matches pod1 and pod2; change the selector to match only pod1. Verify
// that rc creates one more pod, so there are 3 pods. Also verify that
// pod2's controllerRef is cleared.
2017-01-02 16:35:12 +00:00
stopCh := make(chan struct{})
s, closeFn, rm, informers, clientSet := rmSetup(t, stopCh)
defer closeFn()
2016-06-10 23:28:42 +00:00
ns := framework.CreateTestingNamespace("update-selector-to-remove-controllerref", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rc := newRC("rc", ns.Name, 2)
pod1 := newMatchingPod("pod1", ns.Name)
pod1.Labels["uniqueKey"] = "1"
pod2 := newMatchingPod("pod2", ns.Name)
pod2.Labels["uniqueKey"] = "2"
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 2)
2016-06-10 23:28:42 +00:00
go rm.Run(5, stopCh)
waitRCStable(t, clientSet, rc, ns.Name)
// change the rc's selector to match both pods
patch := `{"spec":{"selector":{"uniqueKey":"1"},"template":{"metadata":{"labels":{"uniqueKey":"1"}}}}}`
rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name)
2017-01-16 20:13:59 +00:00
rc, err := rcClient.Patch(rc.Name, types.StrategicMergePatchType, []byte(patch))
2016-06-10 23:28:42 +00:00
if err != nil {
t.Fatalf("Failed to patch replication controller: %v", err)
}
t.Logf("patched rc = %#v", rc)
// wait for the rc to create one more pod
2017-10-11 09:44:30 +00:00
if err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
2016-06-10 23:28:42 +00:00
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 3)
}); err != nil {
t.Fatal(err)
}
podClient := clientSet.CoreV1().Pods(ns.Name)
2016-12-07 14:40:26 +00:00
pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
2016-06-10 23:28:42 +00:00
if err != nil {
t.Fatalf("Failed to get pod2: %v", err)
}
if len(pod2.OwnerReferences) != 0 {
t.Fatalf("ownerReferences of pod2 is not cleared, got %#v", pod2.OwnerReferences)
}
close(stopCh)
}
func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
// We have pod1, pod2 and rc. rc.spec.replicas=2. At first rc.Selector
2016-07-29 17:32:56 +00:00
// matches pod1 and pod2; change pod2's labels to non-matching. Verify
2016-06-10 23:28:42 +00:00
// that rc creates one more pod, so there are 3 pods. Also verify that
// pod2's controllerRef is cleared.
2017-01-02 16:35:12 +00:00
stopCh := make(chan struct{})
s, closeFn, rm, _, clientSet := rmSetup(t, stopCh)
defer closeFn()
2016-06-10 23:28:42 +00:00
ns := framework.CreateTestingNamespace("update-label-to-remove-controllerref", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rc := newRC("rc", ns.Name, 2)
pod1 := newMatchingPod("pod1", ns.Name)
pod2 := newMatchingPod("pod2", ns.Name)
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
go rm.Run(5, stopCh)
waitRCStable(t, clientSet, rc, ns.Name)
// change the rc's selector to match both pods
patch := `{"metadata":{"labels":{"name":null}}}`
podClient := clientSet.CoreV1().Pods(ns.Name)
2017-01-16 20:13:59 +00:00
pod2, err := podClient.Patch(pod2.Name, types.StrategicMergePatchType, []byte(patch))
2016-06-10 23:28:42 +00:00
if err != nil {
t.Fatalf("Failed to patch pod2: %v", err)
}
t.Logf("patched pod2 = %#v", pod2)
// wait for the rc to create one more pod
2017-10-11 09:44:30 +00:00
if err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
2016-06-10 23:28:42 +00:00
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 3)
}); err != nil {
t.Fatal(err)
}
2016-12-07 14:40:26 +00:00
pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
2016-06-10 23:28:42 +00:00
if err != nil {
t.Fatalf("Failed to get pod2: %v", err)
}
if len(pod2.OwnerReferences) != 0 {
t.Fatalf("ownerReferences of pod2 is not cleared, got %#v", pod2.OwnerReferences)
}
close(stopCh)
}
func TestUpdateLabelToBeAdopted(t *testing.T) {
// We have pod1, pod2 and rc. rc.spec.replicas=1. At first rc.Selector
2016-07-29 17:32:56 +00:00
// matches pod1 only; change pod2's labels to be matching. Verify the RC
2016-06-10 23:28:42 +00:00
// controller adopts pod2 and delete one of them, so there is only 1 pod
// left.
2017-01-02 16:35:12 +00:00
stopCh := make(chan struct{})
s, closeFn, rm, _, clientSet := rmSetup(t, stopCh)
defer closeFn()
2016-06-10 23:28:42 +00:00
ns := framework.CreateTestingNamespace("update-label-to-be-adopted", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rc := newRC("rc", ns.Name, 1)
// let rc's selector only matches pod1
rc.Spec.Selector["uniqueKey"] = "1"
rc.Spec.Template.Labels["uniqueKey"] = "1"
pod1 := newMatchingPod("pod1", ns.Name)
pod1.Labels["uniqueKey"] = "1"
pod2 := newMatchingPod("pod2", ns.Name)
pod2.Labels["uniqueKey"] = "2"
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
go rm.Run(5, stopCh)
waitRCStable(t, clientSet, rc, ns.Name)
// change the rc's selector to match both pods
patch := `{"metadata":{"labels":{"uniqueKey":"1"}}}`
podClient := clientSet.CoreV1().Pods(ns.Name)
2017-01-16 20:13:59 +00:00
pod2, err := podClient.Patch(pod2.Name, types.StrategicMergePatchType, []byte(patch))
2016-06-10 23:28:42 +00:00
if err != nil {
t.Fatalf("Failed to patch pod2: %v", err)
}
t.Logf("patched pod2 = %#v", pod2)
// wait for the rc to select both pods and delete one of them
2017-10-11 09:44:30 +00:00
if err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
2016-06-10 23:28:42 +00:00
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 1)
}); err != nil {
t.Fatal(err)
}
close(stopCh)
}