mirror of https://github.com/k3s-io/k3s
Merge pull request #52067 from crimsonfaith91/revamp
Automatic merge from submit-queue (batch tested with PRs 53444, 52067, 53571, 53182). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. revamp replicaset integration tests **What this PR does / why we need it**: This PR revamps existing replicaset integration tests. Some unit tests have been converted to integration tests. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #51484 **Release note**: ```release-note NONE ``` **TODO List**: - [x] add an integration test to verify scale endpoint works - [ ] convert testReplicaSetConditionCheck() to integration test, and modify the test as replicaset's condition has been removed - [ ] ~~HPA-related replicaset integration test (may be better suited under HPA integration tests)~~ - [x] verify all tests from "Suggested unit tests to retain" list of the internal doc will not be converted to integration tests, or convert the tests accordingly - [ ] ~~refactor sync call tree (refer deployment and daemonset PRs)~~ - [x] further improve written integration tests (revise test strategies, remove redundant GET / UPDATE calls, add more relevant sub-tests) - [x] remove unit tests that have overlapping testing goals with written integration testspull/6/head
commit
c381022273
|
@ -14,17 +14,23 @@ go_test(
|
|||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -26,19 +26,30 @@ import (
|
|||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
typedv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/retry"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func testLabels() map[string]string {
|
||||
return map[string]string{"name": "test"}
|
||||
const (
|
||||
interval = 100 * time.Millisecond
|
||||
timeout = 60 * time.Second
|
||||
)
|
||||
|
||||
func labelMap() map[string]string {
|
||||
return map[string]string{"foo": "bar"}
|
||||
}
|
||||
|
||||
func newRS(name, namespace string, replicas int) *v1beta1.ReplicaSet {
|
||||
|
@ -54,12 +65,12 @@ func newRS(name, namespace string, replicas int) *v1beta1.ReplicaSet {
|
|||
},
|
||||
Spec: v1beta1.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: testLabels(),
|
||||
MatchLabels: labelMap(),
|
||||
},
|
||||
Replicas: &replicasCopy,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: testLabels(),
|
||||
Labels: labelMap(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
@ -83,7 +94,7 @@ func newMatchingPod(podName, namespace string) *v1.Pod {
|
|||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: testLabels(),
|
||||
Labels: labelMap(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
@ -162,19 +173,232 @@ func rmSimpleSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, clients
|
|||
return s, closeFn, clientSet
|
||||
}
|
||||
|
||||
// Run RS controller and informers
|
||||
func runControllerAndInformers(t *testing.T, rm *replicaset.ReplicaSetController, informers informers.SharedInformerFactory, podNum int) chan struct{} {
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
waitToObservePods(t, informers.Core().V1().Pods().Informer(), podNum)
|
||||
go rm.Run(5, stopCh)
|
||||
return stopCh
|
||||
}
|
||||
|
||||
// wait for the podInformer to observe the pods. Call this function before
|
||||
// running the RS controller to prevent the rc manager from creating new pods
|
||||
// rather than adopting the existing ones.
|
||||
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
if len(objects) == podNum {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return len(objects) == podNum, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
t.Fatalf("Error encountered when waiting for podInformer to observe the pods: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.ReplicaSet, pods []*v1.Pod) ([]*v1beta1.ReplicaSet, []*v1.Pod) {
|
||||
var createdRSs []*v1beta1.ReplicaSet
|
||||
var createdPods []*v1.Pod
|
||||
for _, rs := range rss {
|
||||
createdRS, err := clientSet.Extensions().ReplicaSets(rs.Namespace).Create(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replica set %s: %v", rs.Name, err)
|
||||
}
|
||||
createdRSs = append(createdRSs, createdRS)
|
||||
}
|
||||
for _, pod := range pods {
|
||||
createdPod, err := clientSet.Core().Pods(pod.Namespace).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
||||
}
|
||||
createdPods = append(createdPods, createdPod)
|
||||
}
|
||||
|
||||
return createdRSs, createdPods
|
||||
}
|
||||
|
||||
// Verify .Status.Replicas is equal to .Spec.Replicas
|
||||
func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.ReplicaSet) {
|
||||
rsClient := clientSet.Extensions().ReplicaSets(rs.Namespace)
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return newRS.Status.Replicas == *rs.Spec.Replicas, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify .Status.Replicas is equal to .Spec.Replicas for rs %s: %v", rs.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update .Spec.Replicas to replicas and verify .Status.Replicas is changed accordingly
|
||||
func scaleRS(t *testing.T, c clientset.Interface, rs *v1beta1.ReplicaSet, replicas int32) {
|
||||
rsClient := c.Extensions().ReplicaSets(rs.Namespace)
|
||||
rs = updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) {
|
||||
*rs.Spec.Replicas = replicas
|
||||
})
|
||||
waitRSStable(t, c, rs)
|
||||
}
|
||||
|
||||
func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, updateFunc func(*v1.Pod)) *v1.Pod {
|
||||
var pod *v1.Pod
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newPod, err := podClient.Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateFunc(newPod)
|
||||
pod, err = podClient.Update(newPod)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to update pod %s: %v", podName, err)
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, pod *v1.Pod, updateStatusFunc func(*v1.Pod)) {
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateStatusFunc(newPod)
|
||||
_, err = podClient.UpdateStatus(newPod)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func getPods(t *testing.T, podClient typedv1.PodInterface, labelMap map[string]string) *v1.PodList {
|
||||
podSelector := labels.Set(labelMap).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: podSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed obtaining a list of pods that match the pod labels %v: %v", labelMap, err)
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
func updateRS(t *testing.T, rsClient typedv1beta1.ReplicaSetInterface, rsName string, updateFunc func(*v1beta1.ReplicaSet)) *v1beta1.ReplicaSet {
|
||||
var rs *v1beta1.ReplicaSet
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newRS, err := rsClient.Get(rsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateFunc(newRS)
|
||||
rs, err = rsClient.Update(newRS)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to update rs %s: %v", rsName, err)
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
// Verify ControllerRef of a RS pod that has incorrect attributes is automatically patched by the RS
|
||||
func testPodControllerRefPatch(t *testing.T, c clientset.Interface, pod *v1.Pod, ownerReference *metav1.OwnerReference, rs *v1beta1.ReplicaSet, expectedOwnerReferenceNum int) {
|
||||
ns := rs.Namespace
|
||||
podClient := c.Core().Pods(ns)
|
||||
updatePod(t, podClient, pod.Name, func(pod *v1.Pod) {
|
||||
pod.OwnerReferences = []metav1.OwnerReference{*ownerReference}
|
||||
})
|
||||
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return metav1.GetControllerOf(newPod) != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify ControllerRef for the pod %s is not nil: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
newPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain pod %s: %v", pod.Name, err)
|
||||
}
|
||||
controllerRef := metav1.GetControllerOf(newPod)
|
||||
if controllerRef.UID != rs.UID {
|
||||
t.Fatalf("RS owner of the pod %s has a different UID: Expected %v, got %v", newPod.Name, rs.UID, controllerRef.UID)
|
||||
}
|
||||
ownerReferenceNum := len(newPod.GetOwnerReferences())
|
||||
if ownerReferenceNum != expectedOwnerReferenceNum {
|
||||
t.Fatalf("Unexpected number of owner references for pod %s: Expected %d, got %d", newPod.Name, expectedOwnerReferenceNum, ownerReferenceNum)
|
||||
}
|
||||
}
|
||||
|
||||
func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1.PodList, conditionStatus v1.ConditionStatus, lastTransitionTime time.Time) {
|
||||
replicas := int32(len(pods.Items))
|
||||
var readyPods int32
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
readyPods = 0
|
||||
for i := range pods.Items {
|
||||
pod := &pods.Items[i]
|
||||
if podutil.IsPodReady(pod) {
|
||||
readyPods++
|
||||
continue
|
||||
}
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
_, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady)
|
||||
if condition != nil {
|
||||
condition.Status = conditionStatus
|
||||
condition.LastTransitionTime = metav1.Time{Time: lastTransitionTime}
|
||||
} else {
|
||||
condition = &v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: conditionStatus,
|
||||
LastTransitionTime: metav1.Time{Time: lastTransitionTime},
|
||||
}
|
||||
pod.Status.Conditions = append(pod.Status.Conditions, *condition)
|
||||
}
|
||||
_, err := clientSet.Core().Pods(pod.Namespace).UpdateStatus(pod)
|
||||
if err != nil {
|
||||
// When status fails to be updated, we continue to next pod
|
||||
continue
|
||||
}
|
||||
readyPods++
|
||||
}
|
||||
return readyPods >= replicas, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to mark all ReplicaSet pods to ready: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rs *v1beta1.ReplicaSet, replicas int32) {
|
||||
ns := rs.Namespace
|
||||
rsClient := c.Extensions().ReplicaSets(ns)
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain rs %s: %v", rs.Name, err)
|
||||
}
|
||||
kind := "ReplicaSet"
|
||||
scaleClient := c.ExtensionsV1beta1().Scales(ns)
|
||||
scale, err := scaleClient.Get(kind, rs.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain scale subresource for rs %s: %v", rs.Name, err)
|
||||
}
|
||||
if scale.Spec.Replicas != *newRS.Spec.Replicas {
|
||||
t.Fatalf("Scale subresource for rs %s does not match .Spec.Replicas: expected %d, got %d", rs.Name, *newRS.Spec.Replicas, scale.Spec.Replicas)
|
||||
}
|
||||
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
scale, err := scaleClient.Get(kind, rs.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scale.Spec.Replicas = replicas
|
||||
_, err = scaleClient.Update(kind, scale)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to set .Spec.Replicas of scale subresource for rs %s: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
newRS, err = rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain rs %s: %v", rs.Name, err)
|
||||
}
|
||||
if *newRS.Spec.Replicas != replicas {
|
||||
t.Fatalf(".Spec.Replicas of rs %s does not match its scale subresource: expected %d, got %d", rs.Name, replicas, *newRS.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -229,251 +453,45 @@ func TestAdoption(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
s, closeFn, rm, informers, clientSet := rmSetup(t)
|
||||
defer closeFn()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
ns := framework.CreateTestingNamespace(fmt.Sprintf("rs-adoption-%d", i), s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
func() {
|
||||
s, closeFn, rm, informers, clientSet := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace(fmt.Sprintf("rs-adoption-%d", i), s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
const rsName = "rs"
|
||||
rs, err := rsClient.Create(newRS(rsName, ns.Name, 1))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replica set: %v", err)
|
||||
}
|
||||
podName := fmt.Sprintf("pod%d", i)
|
||||
pod := newMatchingPod(podName, ns.Name)
|
||||
pod.OwnerReferences = tc.existingOwnerReferences(rs)
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
waitToObservePods(t, podInformer, 1)
|
||||
go rm.Run(5, stopCh)
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
const rsName = "rs"
|
||||
rs, err := rsClient.Create(newRS(rsName, ns.Name, 1))
|
||||
if err != nil {
|
||||
return false, err
|
||||
t.Fatalf("Failed to create replica set: %v", err)
|
||||
}
|
||||
if e, a := tc.expectedOwnerReferences(rs), updatedPod.OwnerReferences; reflect.DeepEqual(e, a) {
|
||||
return true, nil
|
||||
} else {
|
||||
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
|
||||
return false, nil
|
||||
podName := fmt.Sprintf("pod%d", i)
|
||||
pod := newMatchingPod(podName, ns.Name)
|
||||
pod.OwnerReferences = tc.existingOwnerReferences(rs)
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatalf("test %q failed: %v", tc.name, err)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.ReplicaSet, pods []*v1.Pod, ns string) {
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns)
|
||||
podClient := clientSet.Core().Pods(ns)
|
||||
for _, rs := range rss {
|
||||
if _, err := rsClient.Create(rs); err != nil {
|
||||
t.Fatalf("Failed to create replica set %s: %v", rs.Name, err)
|
||||
}
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 1)
|
||||
defer close(stopCh)
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if e, a := tc.expectedOwnerReferences(rs), updatedPod.OwnerReferences; reflect.DeepEqual(e, a) {
|
||||
return true, nil
|
||||
} else {
|
||||
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatalf("test %q failed: %v", tc.name, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.ReplicaSet, ns string) {
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns)
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
updatedRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if updatedRS.Status.Replicas != *rs.Spec.Replicas {
|
||||
return false, nil
|
||||
} else {
|
||||
return true, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateSelectorToAdopt(t *testing.T) {
|
||||
// We have pod1, pod2 and rs. rs.spec.replicas=1. At first rs.Selector
|
||||
// matches pod1 only; change the selector to match pod2 as well. Verify
|
||||
// there is only one pod left.
|
||||
s, closeFn, rm, informers, clientSet := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("rs-update-selector-to-adopt", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rs := newRS("rs", ns.Name, 1)
|
||||
// let rs's selector only match pod1
|
||||
rs.Spec.Selector.MatchLabels["uniqueKey"] = "1"
|
||||
rs.Spec.Template.Labels["uniqueKey"] = "1"
|
||||
pod1 := newMatchingPod("pod1", ns.Name)
|
||||
pod1.Labels["uniqueKey"] = "1"
|
||||
pod2 := newMatchingPod("pod2", ns.Name)
|
||||
pod2.Labels["uniqueKey"] = "2"
|
||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
waitRSStable(t, clientSet, rs, ns.Name)
|
||||
|
||||
// change the rs's selector to match both pods
|
||||
patch := `{"spec":{"selector":{"matchLabels": {"uniqueKey":null}}}}`
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
rs, err := rsClient.Patch(rs.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch replica set: %v", err)
|
||||
}
|
||||
t.Logf("patched rs = %#v", rs)
|
||||
// wait for the rs select both pods and delete one of them
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 1)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
|
||||
// We have pod1, pod2 and rs. rs.spec.replicas=2. At first rs.Selector
|
||||
// matches pod1 and pod2; change the selector to match only pod1. Verify
|
||||
// that rs creates one more pod, so there are 3 pods. Also verify that
|
||||
// pod2's controllerRef is cleared.
|
||||
s, closeFn, rm, informers, clientSet := rmSetup(t)
|
||||
defer closeFn()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
ns := framework.CreateTestingNamespace("rs-update-selector-to-remove-controllerref", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
pod1 := newMatchingPod("pod1", ns.Name)
|
||||
pod1.Labels["uniqueKey"] = "1"
|
||||
pod2 := newMatchingPod("pod2", ns.Name)
|
||||
pod2.Labels["uniqueKey"] = "2"
|
||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
waitToObservePods(t, podInformer, 2)
|
||||
go rm.Run(5, stopCh)
|
||||
waitRSStable(t, clientSet, rs, ns.Name)
|
||||
|
||||
// change the rs's selector to match both pods
|
||||
patch := `{"spec":{"selector":{"matchLabels": {"uniqueKey":"1"}},"template":{"metadata":{"labels":{"uniqueKey":"1"}}}}}`
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
rs, err := rsClient.Patch(rs.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch replica set: %v", err)
|
||||
}
|
||||
t.Logf("patched rs = %#v", rs)
|
||||
// wait for the rs to create one more pod
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 3)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get pod2: %v", err)
|
||||
}
|
||||
if len(pod2.OwnerReferences) != 0 {
|
||||
t.Fatalf("ownerReferences of pod2 is not cleared, got %#v", pod2.OwnerReferences)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
|
||||
// We have pod1, pod2 and rs. rs.spec.replicas=2. At first rs.Selector
|
||||
// matches pod1 and pod2; change pod2's labels to non-matching. Verify
|
||||
// that rs creates one more pod, so there are 3 pods. Also verify that
|
||||
// pod2's controllerRef is cleared.
|
||||
s, closeFn, rm, informers, clientSet := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("rs-update-label-to-remove-controllerref", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
pod1 := newMatchingPod("pod1", ns.Name)
|
||||
pod2 := newMatchingPod("pod2", ns.Name)
|
||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
waitRSStable(t, clientSet, rs, ns.Name)
|
||||
|
||||
// change the rs's selector to match both pods
|
||||
patch := `{"metadata":{"labels":{"name":null}}}`
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err := podClient.Patch(pod2.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch pod2: %v", err)
|
||||
}
|
||||
t.Logf("patched pod2 = %#v", pod2)
|
||||
// wait for the rs to create one more pod
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 3)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get pod2: %v", err)
|
||||
}
|
||||
if len(pod2.OwnerReferences) != 0 {
|
||||
t.Fatalf("ownerReferences of pod2 is not cleared, got %#v", pod2.OwnerReferences)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
func TestUpdateLabelToBeAdopted(t *testing.T) {
|
||||
// We have pod1, pod2 and rs. rs.spec.replicas=1. At first rs.Selector
|
||||
// matches pod1 only; change pod2's labels to be matching. Verify the RS
|
||||
// controller adopts pod2 and delete one of them, so there is only 1 pod
|
||||
// left.
|
||||
s, closeFn, rm, informers, clientSet := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("rs-update-label-to-be-adopted", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rs := newRS("rs", ns.Name, 1)
|
||||
// let rs's selector only matches pod1
|
||||
rs.Spec.Selector.MatchLabels["uniqueKey"] = "1"
|
||||
rs.Spec.Template.Labels["uniqueKey"] = "1"
|
||||
pod1 := newMatchingPod("pod1", ns.Name)
|
||||
pod1.Labels["uniqueKey"] = "1"
|
||||
pod2 := newMatchingPod("pod2", ns.Name)
|
||||
pod2.Labels["uniqueKey"] = "2"
|
||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
waitRSStable(t, clientSet, rs, ns.Name)
|
||||
|
||||
// change the rs's selector to match both pods
|
||||
patch := `{"metadata":{"labels":{"uniqueKey":"1"}}}`
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err := podClient.Patch(pod2.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch pod2: %v", err)
|
||||
}
|
||||
t.Logf("patched pod2 = %#v", pod2)
|
||||
// wait for the rs to select both pods and delete one of them
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 1)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
// selectors are IMMUTABLE for all API versions except extensions/v1beta1
|
||||
|
@ -483,7 +501,7 @@ func TestRSSelectorImmutability(t *testing.T) {
|
|||
ns := framework.CreateTestingNamespace("rs-selector-immutability", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rs := newRS("rs", ns.Name, 0)
|
||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{}, ns.Name)
|
||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
|
||||
// test to ensure extensions/v1beta1 selector is mutable
|
||||
newSelectorLabels := map[string]string{"changed_name_extensions_v1beta1": "changed_test_extensions_v1beta1"}
|
||||
|
@ -515,3 +533,400 @@ func TestRSSelectorImmutability(t *testing.T) {
|
|||
t.Errorf("error message does not match, expected type: %s, expected detail: %s, got: %s", expectedErrType, expectedErrDetail, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpecReplicasChange(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-spec-replicas-change", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
// Update .Spec.Replicas and verify .Status.Replicas is changed accordingly
|
||||
scaleRS(t, c, rs, 3)
|
||||
scaleRS(t, c, rs, 0)
|
||||
scaleRS(t, c, rs, 2)
|
||||
|
||||
// Add a template annotation change to test RS's status does update
|
||||
// without .Spec.Replicas change
|
||||
rsClient := c.Extensions().ReplicaSets(ns.Name)
|
||||
var oldGeneration int64
|
||||
newRS := updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) {
|
||||
oldGeneration = rs.Generation
|
||||
rs.Spec.Template.Annotations = map[string]string{"test": "annotation"}
|
||||
})
|
||||
savedGeneration := newRS.Generation
|
||||
if savedGeneration == oldGeneration {
|
||||
t.Fatalf("Failed to verify .Generation has incremented for rs %s", rs.Name)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return newRS.Status.ObservedGeneration >= savedGeneration, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify .Status.ObservedGeneration has incremented for rs %s: %v", rs.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeletingAndFailedPods(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-deleting-and-failed-pods", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
// Verify RS creates 2 pods
|
||||
podClient := c.Core().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 2 {
|
||||
t.Fatalf("len(pods) = %d, want 2", len(pods.Items))
|
||||
}
|
||||
|
||||
// Set first pod as deleting pod
|
||||
// Set finalizers for the pod to simulate pending deletion status
|
||||
deletingPod := &pods.Items[0]
|
||||
updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) {
|
||||
pod.Finalizers = []string{"fake.example.com/blockDeletion"}
|
||||
})
|
||||
if err := c.Core().Pods(ns.Name).Delete(deletingPod.Name, &metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err)
|
||||
}
|
||||
|
||||
// Set second pod as failed pod
|
||||
failedPod := &pods.Items[1]
|
||||
updatePodStatus(t, podClient, failedPod, func(pod *v1.Pod) {
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
})
|
||||
|
||||
// Pool until 2 new pods have been created to replace deleting and failed pods
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
pods = getPods(t, podClient, labelMap())
|
||||
return len(pods.Items) == 4, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify 2 new pods have been created (expected 4 pods): %v", err)
|
||||
}
|
||||
|
||||
// Verify deleting and failed pods are among the four pods
|
||||
foundDeletingPod := false
|
||||
foundFailedPod := false
|
||||
for _, pod := range pods.Items {
|
||||
if pod.UID == deletingPod.UID {
|
||||
foundDeletingPod = true
|
||||
}
|
||||
if pod.UID == failedPod.UID {
|
||||
foundFailedPod = true
|
||||
}
|
||||
}
|
||||
// Verify deleting pod exists
|
||||
if !foundDeletingPod {
|
||||
t.Fatalf("expected deleting pod %s exists, but it is not found", deletingPod.Name)
|
||||
}
|
||||
// Verify failed pod exists
|
||||
if !foundFailedPod {
|
||||
t.Fatalf("expected failed pod %s exists, but it is not found", failedPod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOverlappingRSs(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-overlapping-rss", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
// Create 2 RSs with identical selectors
|
||||
for i := 0; i < 2; i++ {
|
||||
// One RS has 1 replica, and another has 2 replicas
|
||||
rs := newRS(fmt.Sprintf("rs-%d", i+1), ns.Name, i+1)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
waitRSStable(t, c, rss[0])
|
||||
}
|
||||
|
||||
// Expect 3 total Pods to be created
|
||||
podClient := c.Core().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 3 {
|
||||
t.Errorf("len(pods) = %d, want 3", len(pods.Items))
|
||||
}
|
||||
|
||||
// Expect both RSs have .status.replicas = .spec.replicas
|
||||
for i := 0; i < 2; i++ {
|
||||
newRS, err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(fmt.Sprintf("rs-%d", i+1), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to obtain rs rs-%d: %v", i+1, err)
|
||||
}
|
||||
if newRS.Status.Replicas != *newRS.Spec.Replicas {
|
||||
t.Fatalf(".Status.Replicas %d is not equal to .Spec.Replicas %d", newRS.Status.Replicas, *newRS.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-pod-orphaning-and-adoption-when-labels-change", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
rs := newRS("rs", ns.Name, 1)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
// Orphaning: RS should remove OwnerReference from a pod when the pod's labels change to not match its labels
|
||||
podClient := c.Core().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 1 {
|
||||
t.Fatalf("len(pods) = %d, want 1", len(pods.Items))
|
||||
}
|
||||
pod := &pods.Items[0]
|
||||
|
||||
// Start by verifying ControllerRef for the pod is not nil
|
||||
if metav1.GetControllerOf(pod) == nil {
|
||||
t.Fatalf("ControllerRef of pod %s is nil", pod.Name)
|
||||
}
|
||||
newLabelMap := map[string]string{"new-foo": "new-bar"}
|
||||
updatePod(t, podClient, pod.Name, func(pod *v1.Pod) {
|
||||
pod.Labels = newLabelMap
|
||||
})
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pod = newPod
|
||||
return metav1.GetControllerOf(newPod) == nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify ControllerRef for the pod %s is nil: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
// Adoption: RS should add ControllerRef to a pod when the pod's labels change to match its labels
|
||||
updatePod(t, podClient, pod.Name, func(pod *v1.Pod) {
|
||||
pod.Labels = labelMap()
|
||||
})
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// If the pod is not found, it means the RS picks the pod for deletion (it is extra)
|
||||
// Verify there is only one pod in namespace and it has ControllerRef to the RS
|
||||
if errors.IsNotFound(err) {
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 1 {
|
||||
return false, fmt.Errorf("Expected 1 pod in current namespace, got %d", len(pods.Items))
|
||||
}
|
||||
// Set the pod accordingly
|
||||
pod = &pods.Items[0]
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
// Always update the pod so that we can save a GET call to API server later
|
||||
pod = newPod
|
||||
// If the pod is found, verify the pod has a ControllerRef
|
||||
return metav1.GetControllerOf(newPod) != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify ControllerRef for pod %s is not nil: %v", pod.Name, err)
|
||||
}
|
||||
// Verify the pod has a ControllerRef to the RS
|
||||
// Do nothing if the pod is nil (i.e., has been picked for deletion)
|
||||
if pod != nil {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef.UID != rs.UID {
|
||||
t.Fatalf("RS owner of the pod %s has a different UID: Expected %v, got %v", pod.Name, rs.UID, controllerRef.UID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneralPodAdoption(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-general-pod-adoption", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
rs := newRS("rs", ns.Name, 1)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
podClient := c.Core().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 1 {
|
||||
t.Fatalf("len(pods) = %d, want 1", len(pods.Items))
|
||||
}
|
||||
pod := &pods.Items[0]
|
||||
var falseVar = false
|
||||
|
||||
// When the only OwnerReference of the pod points to another type of API object such as statefulset
|
||||
// with Controller=false, the RS should add a second OwnerReference (ControllerRef) pointing to itself
|
||||
// with Controller=true
|
||||
ownerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "apps/v1beta1", Kind: "StatefulSet", Name: rs.Name, Controller: &falseVar}
|
||||
testPodControllerRefPatch(t, c, pod, &ownerReference, rs, 2)
|
||||
|
||||
// When the only OwnerReference of the pod points to the RS, but Controller=false
|
||||
ownerReference = metav1.OwnerReference{UID: rs.UID, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &falseVar}
|
||||
testPodControllerRefPatch(t, c, pod, &ownerReference, rs, 1)
|
||||
}
|
||||
|
||||
func TestReadyAndAvailableReplicas(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-ready-and-available-replicas", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
rs := newRS("rs", ns.Name, 3)
|
||||
rs.Spec.MinReadySeconds = 3600
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
// First verify no pod is available
|
||||
if rs.Status.AvailableReplicas != 0 {
|
||||
t.Fatalf("Unexpected .Status.AvailableReplicas: Expected 0, saw %d", rs.Status.AvailableReplicas)
|
||||
}
|
||||
|
||||
podClient := c.Core().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 3 {
|
||||
t.Fatalf("len(pods) = %d, want 3", len(pods.Items))
|
||||
}
|
||||
|
||||
// Separate 3 pods into their own list
|
||||
firstPodList := &v1.PodList{Items: pods.Items[:1]}
|
||||
secondPodList := &v1.PodList{Items: pods.Items[1:2]}
|
||||
thirdPodList := &v1.PodList{Items: pods.Items[2:]}
|
||||
// First pod: Running, but not Ready
|
||||
// by setting the Ready condition to false with LastTransitionTime to be now
|
||||
setPodsReadyCondition(t, c, firstPodList, v1.ConditionFalse, time.Now())
|
||||
// Second pod: Running and Ready, but not Available
|
||||
// by setting LastTransitionTime to now
|
||||
setPodsReadyCondition(t, c, secondPodList, v1.ConditionTrue, time.Now())
|
||||
// Third pod: Running, Ready, and Available
|
||||
// by setting LastTransitionTime to more than 3600 seconds ago
|
||||
setPodsReadyCondition(t, c, thirdPodList, v1.ConditionTrue, time.Now().Add(-120*time.Minute))
|
||||
|
||||
rsClient := c.Extensions().ReplicaSets(ns.Name)
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Verify 3 pods exist, 2 pods are Ready, and 1 pod is Available
|
||||
return newRS.Status.Replicas == 3 && newRS.Status.ReadyReplicas == 2 && newRS.Status.AvailableReplicas == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify number of Replicas, ReadyReplicas and AvailableReplicas of rs %s to be as expected: %v", rs.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRSScaleSubresource(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-rs-scale-subresource", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
rs := newRS("rs", ns.Name, 1)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
// Use scale subresource to scale up .Spec.Replicas to 3
|
||||
testScalingUsingScaleSubresource(t, c, rs, 3)
|
||||
// Use the scale subresource to scale down .Spec.Replicas to 0
|
||||
testScalingUsingScaleSubresource(t, c, rs, 0)
|
||||
}
|
||||
|
||||
func TestExtraPodsAdoptionAndDeletion(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-extra-pods-adoption-and-deletion", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
// Create 3 pods, RS should adopt only 2 of them
|
||||
podList := []*v1.Pod{}
|
||||
for i := 0; i < 3; i++ {
|
||||
pod := newMatchingPod(fmt.Sprintf("pod-%d", i+1), ns.Name)
|
||||
pod.Labels = labelMap()
|
||||
podList = append(podList, pod)
|
||||
}
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, podList)
|
||||
rs = rss[0]
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 3)
|
||||
defer close(stopCh)
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
// Verify the extra pod is deleted eventually by determining whether number of
|
||||
// all pods within namespace matches .spec.replicas of the RS (2 in this case)
|
||||
podClient := c.Core().Pods(ns.Name)
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
// All pods have labelMap as their labels
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
return int32(len(pods.Items)) == *rs.Spec.Replicas, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify number of all pods within current namespace matches .spec.replicas of rs %s: %v", rs.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFullyLabeledReplicas(t *testing.T) {
|
||||
s, closeFn, rm, informers, c := rmSetup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("test-fully-labeled-replicas", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
stopCh := runControllerAndInformers(t, rm, informers, 0)
|
||||
defer close(stopCh)
|
||||
|
||||
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
rss, _ := createRSsPods(t, c, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{})
|
||||
rs = rss[0]
|
||||
waitRSStable(t, c, rs)
|
||||
|
||||
// Change RS's template labels to have extra labels, but not its selector
|
||||
rsClient := c.Extensions().ReplicaSets(ns.Name)
|
||||
updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) {
|
||||
rs.Spec.Template.Labels = extraLabelMap
|
||||
})
|
||||
|
||||
// Set one of the pods to have extra labels
|
||||
podClient := c.Core().Pods(ns.Name)
|
||||
pods := getPods(t, podClient, labelMap())
|
||||
if len(pods.Items) != 2 {
|
||||
t.Fatalf("len(pods) = %d, want 2", len(pods.Items))
|
||||
}
|
||||
fullyLabeledPod := &pods.Items[0]
|
||||
updatePod(t, podClient, fullyLabeledPod.Name, func(pod *v1.Pod) {
|
||||
pod.Labels = extraLabelMap
|
||||
})
|
||||
|
||||
// Verify only one pod is fully labeled
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return (newRS.Status.Replicas == 2 && newRS.Status.FullyLabeledReplicas == 1), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to verify only one pod is fully labeled: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue