2017-09-08 18:44:45 +00:00
|
|
|
/*
|
|
|
|
Copyright 2017 The Kubernetes Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package daemonset
|
|
|
|
|
|
|
|
import (
|
2018-03-22 02:39:40 +00:00
|
|
|
"fmt"
|
2017-09-08 18:44:45 +00:00
|
|
|
"net/http/httptest"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2018-03-22 02:39:40 +00:00
|
|
|
apps "k8s.io/api/apps/v1"
|
2017-09-08 18:44:45 +00:00
|
|
|
"k8s.io/api/core/v1"
|
2018-03-22 02:39:40 +00:00
|
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
"k8s.io/apimachinery/pkg/util/intstr"
|
2018-04-30 05:43:22 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/uuid"
|
2018-03-22 02:39:40 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
2017-09-08 18:44:45 +00:00
|
|
|
"k8s.io/client-go/informers"
|
|
|
|
clientset "k8s.io/client-go/kubernetes"
|
2018-03-22 02:39:40 +00:00
|
|
|
appstyped "k8s.io/client-go/kubernetes/typed/apps/v1"
|
2017-09-08 18:44:45 +00:00
|
|
|
corev1typed "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
|
|
restclient "k8s.io/client-go/rest"
|
2018-03-22 02:39:40 +00:00
|
|
|
"k8s.io/client-go/tools/cache"
|
|
|
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
2017-09-08 18:44:45 +00:00
|
|
|
"k8s.io/kubernetes/pkg/controller/daemon"
|
2017-10-31 14:19:55 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/metrics"
|
2017-09-08 18:44:45 +00:00
|
|
|
"k8s.io/kubernetes/test/integration/framework"
|
|
|
|
)
|
|
|
|
|
2018-04-30 05:43:22 +00:00
|
|
|
var zero = int64(0)
|
|
|
|
|
2017-09-08 18:44:45 +00:00
|
|
|
func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) {
|
|
|
|
masterConfig := framework.NewIntegrationTestMasterConfig()
|
|
|
|
_, server, closeFn := framework.RunAMaster(masterConfig)
|
|
|
|
|
|
|
|
config := restclient.Config{Host: server.URL}
|
|
|
|
clientSet, err := clientset.NewForConfig(&config)
|
|
|
|
if err != nil {
|
2018-03-22 02:39:40 +00:00
|
|
|
t.Fatalf("Error in creating clientset: %v", err)
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|
|
|
|
resyncPeriod := 12 * time.Hour
|
2018-03-22 02:39:40 +00:00
|
|
|
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-informers")), resyncPeriod)
|
2017-10-31 14:19:55 +00:00
|
|
|
metrics.UnregisterMetricAndUntrackRateLimiterUsage("daemon_controller")
|
|
|
|
dc, err := daemon.NewDaemonSetsController(
|
2018-03-22 02:39:40 +00:00
|
|
|
informers.Apps().V1().DaemonSets(),
|
|
|
|
informers.Apps().V1().ControllerRevisions(),
|
|
|
|
informers.Core().V1().Pods(),
|
|
|
|
informers.Core().V1().Nodes(),
|
2017-09-08 18:44:45 +00:00
|
|
|
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-controller")),
|
|
|
|
)
|
2017-10-31 14:19:55 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating DaemonSets controller: %v", err)
|
|
|
|
}
|
2017-09-08 18:44:45 +00:00
|
|
|
|
2018-03-22 02:39:40 +00:00
|
|
|
return server, closeFn, dc, informers, clientSet
|
|
|
|
}
|
|
|
|
|
|
|
|
func testLabels() map[string]string {
|
|
|
|
return map[string]string{"name": "test"}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newDaemonSet(name, namespace string) *apps.DaemonSet {
|
|
|
|
two := int32(2)
|
|
|
|
return &apps.DaemonSet{
|
|
|
|
TypeMeta: metav1.TypeMeta{
|
|
|
|
Kind: "DaemonSet",
|
|
|
|
APIVersion: "apps/v1",
|
|
|
|
},
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Namespace: namespace,
|
|
|
|
Name: name,
|
|
|
|
},
|
|
|
|
Spec: apps.DaemonSetSpec{
|
|
|
|
RevisionHistoryLimit: &two,
|
|
|
|
Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
|
|
|
|
UpdateStrategy: apps.DaemonSetUpdateStrategy{
|
|
|
|
Type: apps.OnDeleteDaemonSetStrategyType,
|
|
|
|
},
|
|
|
|
Template: v1.PodTemplateSpec{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: testLabels(),
|
|
|
|
},
|
|
|
|
Spec: v1.PodSpec{
|
2018-04-30 05:43:22 +00:00
|
|
|
Containers: []v1.Container{{Name: "foo", Image: "bar"}},
|
|
|
|
TerminationGracePeriodSeconds: &zero,
|
2018-03-22 02:39:40 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-30 05:43:22 +00:00
|
|
|
func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet) {
|
|
|
|
ds, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Failed to get DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// We set the nodeSelector to a random label. This label is nearly guaranteed
|
|
|
|
// to not be set on any node so the DameonSetController will start deleting
|
|
|
|
// daemon pods. Once it's done deleting the daemon pods, it's safe to delete
|
|
|
|
// the DaemonSet.
|
|
|
|
ds.Spec.Template.Spec.NodeSelector = map[string]string{
|
|
|
|
string(uuid.NewUUID()): string(uuid.NewUUID()),
|
|
|
|
}
|
|
|
|
// force update to avoid version conflict
|
|
|
|
ds.ResourceVersion = ""
|
|
|
|
|
|
|
|
if ds, err = cs.AppsV1().DaemonSets(ds.Namespace).Update(ds); err != nil {
|
|
|
|
t.Errorf("Failed to update DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the daemon set controller to kill all the daemon pods.
|
|
|
|
if err := wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
|
|
|
updatedDS, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Errorf("Failed to kill the pods of DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
falseVar := false
|
|
|
|
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
|
|
|
if err := cs.AppsV1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions); err != nil {
|
|
|
|
t.Errorf("Failed to delete DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-22 02:39:40 +00:00
|
|
|
func newRollbackStrategy() *apps.DaemonSetUpdateStrategy {
|
|
|
|
one := intstr.FromInt(1)
|
|
|
|
return &apps.DaemonSetUpdateStrategy{
|
|
|
|
Type: apps.RollingUpdateDaemonSetStrategyType,
|
|
|
|
RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newOnDeleteStrategy() *apps.DaemonSetUpdateStrategy {
|
|
|
|
return &apps.DaemonSetUpdateStrategy{
|
|
|
|
Type: apps.OnDeleteDaemonSetStrategyType,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func updateStrategies() []*apps.DaemonSetUpdateStrategy {
|
|
|
|
return []*apps.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
|
|
|
|
}
|
|
|
|
|
|
|
|
func allocatableResources(memory, cpu string) v1.ResourceList {
|
|
|
|
return v1.ResourceList{
|
|
|
|
v1.ResourceMemory: resource.MustParse(memory),
|
|
|
|
v1.ResourceCPU: resource.MustParse(cpu),
|
|
|
|
v1.ResourcePods: resource.MustParse("100"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
|
|
|
|
return v1.PodSpec{
|
|
|
|
NodeName: nodeName,
|
|
|
|
Containers: []v1.Container{
|
|
|
|
{
|
|
|
|
Name: "foo",
|
|
|
|
Image: "bar",
|
|
|
|
Resources: v1.ResourceRequirements{
|
|
|
|
Requests: v1.ResourceList{
|
|
|
|
v1.ResourceMemory: resource.MustParse(memory),
|
|
|
|
v1.ResourceCPU: resource.MustParse(cpu),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-04-30 05:43:22 +00:00
|
|
|
TerminationGracePeriodSeconds: &zero,
|
2018-03-22 02:39:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newNode(name string, label map[string]string) *v1.Node {
|
|
|
|
return &v1.Node{
|
|
|
|
TypeMeta: metav1.TypeMeta{
|
|
|
|
Kind: "Node",
|
|
|
|
APIVersion: "v1",
|
|
|
|
},
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: name,
|
|
|
|
Labels: label,
|
|
|
|
Namespace: metav1.NamespaceDefault,
|
|
|
|
},
|
|
|
|
Status: v1.NodeStatus{
|
|
|
|
Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}},
|
|
|
|
Allocatable: v1.ResourceList{v1.ResourcePods: resource.MustParse("100")},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func addNodes(nodeClient corev1typed.NodeInterface, startIndex, numNodes int, label map[string]string, t *testing.T) {
|
|
|
|
for i := startIndex; i < startIndex+numNodes; i++ {
|
|
|
|
_, err := nodeClient.Create(newNode(fmt.Sprintf("node-%d", i), label))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to create node: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateDaemonSetPodsAndMarkReady(
|
|
|
|
podClient corev1typed.PodInterface,
|
|
|
|
podInformer cache.SharedIndexInformer,
|
|
|
|
numberPods int,
|
|
|
|
t *testing.T) {
|
|
|
|
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
|
|
|
objects := podInformer.GetIndexer().List()
|
|
|
|
if len(objects) != numberPods {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, object := range objects {
|
|
|
|
pod := object.(*v1.Pod)
|
|
|
|
|
|
|
|
ownerReferences := pod.ObjectMeta.OwnerReferences
|
|
|
|
if len(ownerReferences) != 1 {
|
|
|
|
return false, fmt.Errorf("Pod %s has %d OwnerReferences, expected only 1", pod.Name, len(ownerReferences))
|
|
|
|
}
|
|
|
|
controllerRef := ownerReferences[0]
|
|
|
|
if got, want := controllerRef.Kind, "DaemonSet"; got != want {
|
|
|
|
t.Errorf("controllerRef.Kind = %q, want %q", got, want)
|
|
|
|
}
|
|
|
|
if controllerRef.Controller == nil || *controllerRef.Controller != true {
|
|
|
|
t.Errorf("controllerRef.Controller is not set to true")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !podutil.IsPodReady(pod) {
|
|
|
|
podCopy := pod.DeepCopy()
|
|
|
|
podCopy.Status = v1.PodStatus{
|
|
|
|
Phase: v1.PodRunning,
|
|
|
|
Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}},
|
|
|
|
}
|
|
|
|
_, err := podClient.UpdateStatus(podCopy)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateDaemonSetStatus(
|
|
|
|
dsClient appstyped.DaemonSetInterface,
|
|
|
|
dsName string,
|
|
|
|
expectedNumberReady int32,
|
|
|
|
t *testing.T) {
|
|
|
|
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
|
|
|
ds, err := dsClient.Get(dsName, metav1.GetOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return ds.Status.NumberReady == expectedNumberReady, nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateFailedPlacementEvent(eventClient corev1typed.EventInterface, t *testing.T) {
|
|
|
|
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
|
|
|
eventList, err := eventClient.List(metav1.ListOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if len(eventList.Items) == 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
if len(eventList.Items) > 1 {
|
|
|
|
t.Errorf("Expected 1 event got %d", len(eventList.Items))
|
|
|
|
}
|
|
|
|
event := eventList.Items[0]
|
|
|
|
if event.Type != v1.EventTypeWarning {
|
|
|
|
t.Errorf("Event type expected %s got %s", v1.EventTypeWarning, event.Type)
|
|
|
|
}
|
|
|
|
if event.Reason != daemon.FailedPlacementReason {
|
|
|
|
t.Errorf("Event reason expected %s got %s", daemon.FailedPlacementReason, event.Reason)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|
|
|
|
|
2018-04-30 05:43:22 +00:00
|
|
|
func forEachStrategy(t *testing.T, tf func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy)) {
|
2017-09-08 18:44:45 +00:00
|
|
|
for _, strategy := range updateStrategies() {
|
2018-04-30 05:43:22 +00:00
|
|
|
t.Run(fmt.Sprintf("%s (%v)", t.Name(), strategy),
|
|
|
|
func(tt *testing.T) { tf(tt, strategy) })
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
|
|
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
2017-09-08 18:44:45 +00:00
|
|
|
server, closeFn, dc, informers, clientset := setup(t)
|
|
|
|
defer closeFn()
|
|
|
|
ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t)
|
|
|
|
defer framework.DeleteTestingNamespace(ns, server, t)
|
|
|
|
|
2018-02-14 18:37:40 +00:00
|
|
|
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
2017-09-08 18:44:45 +00:00
|
|
|
podClient := clientset.CoreV1().Pods(ns.Name)
|
|
|
|
nodeClient := clientset.CoreV1().Nodes()
|
|
|
|
podInformer := informers.Core().V1().Pods().Informer()
|
2018-04-30 05:43:22 +00:00
|
|
|
|
2017-09-08 18:44:45 +00:00
|
|
|
stopCh := make(chan struct{})
|
|
|
|
informers.Start(stopCh)
|
|
|
|
go dc.Run(5, stopCh)
|
2018-04-30 05:43:22 +00:00
|
|
|
defer close(stopCh)
|
2017-09-08 18:44:45 +00:00
|
|
|
|
|
|
|
ds := newDaemonSet("foo", ns.Name)
|
|
|
|
ds.Spec.UpdateStrategy = *strategy
|
|
|
|
_, err := dsClient.Create(ds)
|
|
|
|
if err != nil {
|
2018-03-22 02:39:40 +00:00
|
|
|
t.Fatalf("Failed to create DaemonSet: %v", err)
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|
2018-04-30 05:43:22 +00:00
|
|
|
defer cleanupDaemonSets(t, clientset, ds)
|
|
|
|
|
2017-09-08 18:44:45 +00:00
|
|
|
_, err = nodeClient.Create(newNode("single-node", nil))
|
|
|
|
if err != nil {
|
2018-03-22 02:39:40 +00:00
|
|
|
t.Fatalf("Failed to create node: %v", err)
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
|
2018-05-10 05:36:21 +00:00
|
|
|
validateDaemonSetStatus(dsClient, ds.Name, 1, t)
|
2018-04-30 05:43:22 +00:00
|
|
|
})
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
2018-04-30 05:43:22 +00:00
|
|
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
2017-09-08 18:44:45 +00:00
|
|
|
server, closeFn, dc, informers, clientset := setup(t)
|
|
|
|
defer closeFn()
|
|
|
|
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
|
|
|
|
defer framework.DeleteTestingNamespace(ns, server, t)
|
|
|
|
|
2018-02-14 18:37:40 +00:00
|
|
|
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
2017-09-08 18:44:45 +00:00
|
|
|
podClient := clientset.CoreV1().Pods(ns.Name)
|
|
|
|
nodeClient := clientset.CoreV1().Nodes()
|
|
|
|
podInformer := informers.Core().V1().Pods().Informer()
|
2018-04-30 05:43:22 +00:00
|
|
|
|
2017-09-08 18:44:45 +00:00
|
|
|
stopCh := make(chan struct{})
|
|
|
|
informers.Start(stopCh)
|
|
|
|
go dc.Run(5, stopCh)
|
2018-04-30 05:43:22 +00:00
|
|
|
defer close(stopCh)
|
2017-09-08 18:44:45 +00:00
|
|
|
|
|
|
|
ds := newDaemonSet("foo", ns.Name)
|
|
|
|
ds.Spec.UpdateStrategy = *strategy
|
|
|
|
_, err := dsClient.Create(ds)
|
|
|
|
if err != nil {
|
2018-03-22 02:39:40 +00:00
|
|
|
t.Fatalf("Failed to create DaemonSet: %v", err)
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|
2018-04-30 05:43:22 +00:00
|
|
|
defer cleanupDaemonSets(t, clientset, ds)
|
|
|
|
|
2017-09-08 18:44:45 +00:00
|
|
|
addNodes(nodeClient, 0, 5, nil, t)
|
|
|
|
|
|
|
|
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 5, t)
|
2018-05-10 05:36:21 +00:00
|
|
|
validateDaemonSetStatus(dsClient, ds.Name, 5, t)
|
2018-04-30 05:43:22 +00:00
|
|
|
})
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
2018-04-30 05:43:22 +00:00
|
|
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
2017-09-08 18:44:45 +00:00
|
|
|
server, closeFn, dc, informers, clientset := setup(t)
|
|
|
|
defer closeFn()
|
|
|
|
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
|
|
|
|
defer framework.DeleteTestingNamespace(ns, server, t)
|
|
|
|
|
2018-02-14 18:37:40 +00:00
|
|
|
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
2017-09-08 18:44:45 +00:00
|
|
|
podClient := clientset.CoreV1().Pods(ns.Name)
|
|
|
|
nodeClient := clientset.CoreV1().Nodes()
|
|
|
|
podInformer := informers.Core().V1().Pods().Informer()
|
2018-04-30 05:43:22 +00:00
|
|
|
|
2017-09-08 18:44:45 +00:00
|
|
|
stopCh := make(chan struct{})
|
|
|
|
informers.Start(stopCh)
|
|
|
|
go dc.Run(5, stopCh)
|
2018-04-30 05:43:22 +00:00
|
|
|
defer close(stopCh)
|
2017-09-08 18:44:45 +00:00
|
|
|
|
|
|
|
ds := newDaemonSet("foo", ns.Name)
|
|
|
|
ds.Spec.UpdateStrategy = *strategy
|
|
|
|
_, err := dsClient.Create(ds)
|
|
|
|
if err != nil {
|
2018-03-22 02:39:40 +00:00
|
|
|
t.Fatalf("Failed to create DaemonSet: %v", err)
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|
2018-04-30 05:43:22 +00:00
|
|
|
defer cleanupDaemonSets(t, clientset, ds)
|
|
|
|
|
2017-09-08 18:44:45 +00:00
|
|
|
node := newNode("single-node", nil)
|
|
|
|
node.Status.Conditions = []v1.NodeCondition{
|
|
|
|
{Type: v1.NodeReady, Status: v1.ConditionFalse},
|
|
|
|
}
|
|
|
|
_, err = nodeClient.Create(node)
|
|
|
|
if err != nil {
|
2018-03-22 02:39:40 +00:00
|
|
|
t.Fatalf("Failed to create node: %v", err)
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
|
2018-05-10 05:36:21 +00:00
|
|
|
validateDaemonSetStatus(dsClient, ds.Name, 1, t)
|
2018-04-30 05:43:22 +00:00
|
|
|
})
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
2018-04-30 05:43:22 +00:00
|
|
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
2017-09-08 18:44:45 +00:00
|
|
|
server, closeFn, dc, informers, clientset := setup(t)
|
|
|
|
defer closeFn()
|
|
|
|
ns := framework.CreateTestingNamespace("insufficient-capacity", server, t)
|
|
|
|
defer framework.DeleteTestingNamespace(ns, server, t)
|
|
|
|
|
2018-02-14 18:37:40 +00:00
|
|
|
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
2017-09-08 18:44:45 +00:00
|
|
|
nodeClient := clientset.CoreV1().Nodes()
|
2018-03-29 12:24:26 +00:00
|
|
|
eventClient := clientset.CoreV1().Events(ns.Namespace)
|
2018-04-30 05:43:22 +00:00
|
|
|
|
2017-09-08 18:44:45 +00:00
|
|
|
stopCh := make(chan struct{})
|
|
|
|
informers.Start(stopCh)
|
|
|
|
go dc.Run(5, stopCh)
|
2018-04-30 05:43:22 +00:00
|
|
|
defer close(stopCh)
|
2017-09-08 18:44:45 +00:00
|
|
|
|
|
|
|
ds := newDaemonSet("foo", ns.Name)
|
|
|
|
ds.Spec.Template.Spec = resourcePodSpec("node-with-limited-memory", "120M", "75m")
|
|
|
|
ds.Spec.UpdateStrategy = *strategy
|
|
|
|
_, err := dsClient.Create(ds)
|
|
|
|
if err != nil {
|
2018-03-22 02:39:40 +00:00
|
|
|
t.Fatalf("Failed to create DaemonSet: %v", err)
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|
2018-04-30 05:43:22 +00:00
|
|
|
defer cleanupDaemonSets(t, clientset, ds)
|
|
|
|
|
2017-09-08 18:44:45 +00:00
|
|
|
node := newNode("node-with-limited-memory", nil)
|
|
|
|
node.Status.Allocatable = allocatableResources("100M", "200m")
|
|
|
|
_, err = nodeClient.Create(node)
|
|
|
|
if err != nil {
|
2018-03-22 02:39:40 +00:00
|
|
|
t.Fatalf("Failed to create node: %v", err)
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
validateFailedPlacementEvent(eventClient, t)
|
2018-04-30 05:43:22 +00:00
|
|
|
})
|
2017-09-08 18:44:45 +00:00
|
|
|
}
|