2016-07-20 22:08:47 +00:00
|
|
|
/*
|
|
|
|
Copyright 2016 The Kubernetes Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package kubelet
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"reflect"
|
|
|
|
goruntime "runtime"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
|
|
|
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
2016-11-19 23:32:10 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api"
|
2016-07-20 22:08:47 +00:00
|
|
|
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
|
|
|
"k8s.io/kubernetes/pkg/api/resource"
|
2016-11-18 20:50:58 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api/v1"
|
2016-12-03 19:06:03 +00:00
|
|
|
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
2016-11-18 20:50:58 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
2016-07-20 22:08:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/testing/core"
|
|
|
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
2016-08-22 20:38:36 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
2016-07-20 22:08:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/runtime"
|
|
|
|
"k8s.io/kubernetes/pkg/util/diff"
|
|
|
|
"k8s.io/kubernetes/pkg/util/rand"
|
2016-07-26 15:13:18 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/uuid"
|
2016-07-20 22:08:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/wait"
|
|
|
|
"k8s.io/kubernetes/pkg/version"
|
2016-08-30 17:40:25 +00:00
|
|
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
2016-07-20 22:08:47 +00:00
|
|
|
)
|
|
|
|
|
2016-09-16 22:16:08 +00:00
|
|
|
const (
|
|
|
|
maxImageTagsForTest = 20
|
|
|
|
)
|
|
|
|
|
2016-07-20 22:08:47 +00:00
|
|
|
// generateTestingImageList generate randomly generated image list and corresponding expectedImageList.
|
2016-11-18 20:50:58 +00:00
|
|
|
func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerImage) {
|
2016-07-20 22:08:47 +00:00
|
|
|
// imageList is randomly generated image list
|
|
|
|
var imageList []kubecontainer.Image
|
|
|
|
for ; count > 0; count-- {
|
|
|
|
imageItem := kubecontainer.Image{
|
2016-07-26 15:13:18 +00:00
|
|
|
ID: string(uuid.NewUUID()),
|
2016-07-20 22:08:47 +00:00
|
|
|
RepoTags: generateImageTags(),
|
|
|
|
Size: rand.Int63nRange(minImgSize, maxImgSize+1),
|
|
|
|
}
|
|
|
|
imageList = append(imageList, imageItem)
|
|
|
|
}
|
|
|
|
|
|
|
|
// expectedImageList is generated by imageList according to size and maxImagesInNodeStatus
|
|
|
|
// 1. sort the imageList by size
|
2016-08-22 20:38:36 +00:00
|
|
|
sort.Sort(sliceutils.ByImageSize(imageList))
|
2016-11-18 20:50:58 +00:00
|
|
|
// 2. convert sorted imageList to v1.ContainerImage list
|
|
|
|
var expectedImageList []v1.ContainerImage
|
2016-07-20 22:08:47 +00:00
|
|
|
for _, kubeImage := range imageList {
|
2016-11-18 20:50:58 +00:00
|
|
|
apiImage := v1.ContainerImage{
|
2016-09-16 22:16:08 +00:00
|
|
|
Names: kubeImage.RepoTags[0:maxNamesPerImageInNodeStatus],
|
2016-07-20 22:08:47 +00:00
|
|
|
SizeBytes: kubeImage.Size,
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedImageList = append(expectedImageList, apiImage)
|
|
|
|
}
|
|
|
|
// 3. only returns the top maxImagesInNodeStatus images in expectedImageList
|
|
|
|
return imageList, expectedImageList[0:maxImagesInNodeStatus]
|
|
|
|
}
|
|
|
|
|
|
|
|
func generateImageTags() []string {
|
|
|
|
var tagList []string
|
2016-09-16 22:16:08 +00:00
|
|
|
// Generate > maxNamesPerImageInNodeStatus tags so that the test can verify
|
|
|
|
// that kubelet report up to maxNamesPerImageInNodeStatus tags.
|
|
|
|
count := rand.IntnRange(maxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1)
|
2016-07-20 22:08:47 +00:00
|
|
|
for ; count > 0; count-- {
|
|
|
|
tagList = append(tagList, "gcr.io/google_containers:v"+strconv.Itoa(count))
|
|
|
|
}
|
|
|
|
return tagList
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateNewNodeStatus(t *testing.T) {
|
|
|
|
// generate one more than maxImagesInNodeStatus in inputImageList
|
|
|
|
inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1)
|
|
|
|
testKubelet := newTestKubeletWithImageList(
|
|
|
|
t, inputImageList, false /* controllerAttachDetachEnabled */)
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
2016-11-18 20:50:58 +00:00
|
|
|
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{
|
|
|
|
{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}},
|
2016-07-20 22:08:47 +00:00
|
|
|
}}).ReactionChain
|
|
|
|
machineInfo := &cadvisorapi.MachineInfo{
|
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
NumCores: 2,
|
|
|
|
MemoryCapacity: 10E9, // 10G
|
|
|
|
}
|
|
|
|
mockCadvisor := testKubelet.fakeCadvisor
|
|
|
|
mockCadvisor.On("Start").Return(nil)
|
|
|
|
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
|
|
|
versionInfo := &cadvisorapi.VersionInfo{
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
}
|
|
|
|
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
|
|
|
|
|
|
|
// Make kubelet report that it has sufficient disk space.
|
|
|
|
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
|
|
|
|
t.Fatalf("can't update disk space manager: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-11-18 20:50:58 +00:00
|
|
|
expectedNode := &v1.Node{
|
|
|
|
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
|
|
|
Spec: v1.NodeSpec{},
|
|
|
|
Status: v1.NodeStatus{
|
|
|
|
Conditions: []v1.NodeCondition{
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeOutOfDisk,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletHasSufficientDisk",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeMemoryPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletHasSufficientMemory",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient memory available"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-07-22 19:23:09 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeDiskPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-22 19:23:09 +00:00
|
|
|
Reason: "KubeletHasNoDiskPressure",
|
|
|
|
Message: fmt.Sprintf("kubelet has no disk pressure"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-22 19:23:09 +00:00
|
|
|
},
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeReady,
|
|
|
|
Status: v1.ConditionTrue,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletReady",
|
|
|
|
Message: fmt.Sprintf("kubelet is posting ready status"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
NodeInfo: v1.NodeSystemInfo{
|
2016-07-20 22:08:47 +00:00
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
OSImage: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
OperatingSystem: goruntime.GOOS,
|
|
|
|
Architecture: goruntime.GOARCH,
|
|
|
|
ContainerRuntimeVersion: "test://1.5.0",
|
|
|
|
KubeletVersion: version.Get().String(),
|
|
|
|
KubeProxyVersion: version.Get().String(),
|
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Capacity: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Allocatable: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Addresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"},
|
|
|
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
Images: expectedImageList,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
kubelet.updateRuntimeUp()
|
|
|
|
if err := kubelet.updateNodeStatus(); err != nil {
|
|
|
|
t.Errorf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
actions := kubeClient.Actions()
|
|
|
|
if len(actions) != 2 {
|
|
|
|
t.Fatalf("unexpected actions: %v", actions)
|
|
|
|
}
|
|
|
|
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
|
|
|
|
t.Fatalf("unexpected actions: %v", actions)
|
|
|
|
}
|
2016-11-18 20:50:58 +00:00
|
|
|
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*v1.Node)
|
2016-07-20 22:08:47 +00:00
|
|
|
if !ok {
|
|
|
|
t.Errorf("unexpected object type")
|
|
|
|
}
|
|
|
|
for i, cond := range updatedNode.Status.Conditions {
|
|
|
|
if cond.LastHeartbeatTime.IsZero() {
|
|
|
|
t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type)
|
|
|
|
}
|
|
|
|
if cond.LastTransitionTime.IsZero() {
|
|
|
|
t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type)
|
|
|
|
}
|
2016-12-03 18:57:26 +00:00
|
|
|
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
|
|
|
|
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
2016-11-18 20:50:58 +00:00
|
|
|
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != v1.NodeReady {
|
2016-07-20 22:08:47 +00:00
|
|
|
t.Errorf("unexpected node condition order. NodeReady should be last.")
|
|
|
|
}
|
|
|
|
|
|
|
|
if maxImagesInNodeStatus != len(updatedNode.Status.Images) {
|
|
|
|
t.Errorf("unexpected image list length in node status, expected: %v, got: %v", maxImagesInNodeStatus, len(updatedNode.Status.Images))
|
|
|
|
} else {
|
2016-11-19 23:32:10 +00:00
|
|
|
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
|
2016-07-20 22:08:47 +00:00
|
|
|
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
2016-11-18 20:50:58 +00:00
|
|
|
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{
|
|
|
|
{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}},
|
2016-07-20 22:08:47 +00:00
|
|
|
}}).ReactionChain
|
|
|
|
machineInfo := &cadvisorapi.MachineInfo{
|
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
NumCores: 2,
|
|
|
|
MemoryCapacity: 1024,
|
|
|
|
}
|
|
|
|
mockCadvisor := testKubelet.fakeCadvisor
|
|
|
|
mockCadvisor.On("Start").Return(nil)
|
|
|
|
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
|
|
|
versionInfo := &cadvisorapi.VersionInfo{
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
}
|
|
|
|
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
|
|
|
|
|
|
|
// Make Kubelet report that it has sufficient disk space.
|
|
|
|
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
|
|
|
|
t.Fatalf("can't update disk space manager: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
kubelet.outOfDiskTransitionFrequency = 10 * time.Second
|
|
|
|
|
2016-11-18 20:50:58 +00:00
|
|
|
expectedNodeOutOfDiskCondition := v1.NodeCondition{
|
|
|
|
Type: v1.NodeOutOfDisk,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletHasSufficientDisk",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
kubelet.updateRuntimeUp()
|
|
|
|
if err := kubelet.updateNodeStatus(); err != nil {
|
|
|
|
t.Errorf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
actions := kubeClient.Actions()
|
|
|
|
if len(actions) != 2 {
|
|
|
|
t.Fatalf("unexpected actions: %v", actions)
|
|
|
|
}
|
|
|
|
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
|
|
|
|
t.Fatalf("unexpected actions: %v", actions)
|
|
|
|
}
|
2016-11-18 20:50:58 +00:00
|
|
|
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*v1.Node)
|
2016-07-20 22:08:47 +00:00
|
|
|
if !ok {
|
|
|
|
t.Errorf("unexpected object type")
|
|
|
|
}
|
|
|
|
|
2016-11-18 20:50:58 +00:00
|
|
|
var oodCondition v1.NodeCondition
|
2016-07-20 22:08:47 +00:00
|
|
|
for i, cond := range updatedNode.Status.Conditions {
|
|
|
|
if cond.LastHeartbeatTime.IsZero() {
|
|
|
|
t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type)
|
|
|
|
}
|
|
|
|
if cond.LastTransitionTime.IsZero() {
|
|
|
|
t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type)
|
|
|
|
}
|
2016-12-03 18:57:26 +00:00
|
|
|
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
|
|
|
|
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
|
2016-11-18 20:50:58 +00:00
|
|
|
if cond.Type == v1.NodeOutOfDisk {
|
2016-07-20 22:08:47 +00:00
|
|
|
oodCondition = updatedNode.Status.Conditions[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(expectedNodeOutOfDiskCondition, oodCondition) {
|
|
|
|
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNodeOutOfDiskCondition, oodCondition))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateExistingNodeStatus(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
2016-11-18 20:50:58 +00:00
|
|
|
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
|
|
|
Spec: v1.NodeSpec{},
|
|
|
|
Status: v1.NodeStatus{
|
|
|
|
Conditions: []v1.NodeCondition{
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeOutOfDisk,
|
|
|
|
Status: v1.ConditionTrue,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletOutOfDisk",
|
|
|
|
Message: "out of disk space",
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeMemoryPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletHasSufficientMemory",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient memory available"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-07-22 19:23:09 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeDiskPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-22 19:23:09 +00:00
|
|
|
Reason: "KubeletHasSufficientDisk",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
2016-07-22 19:23:09 +00:00
|
|
|
},
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeReady,
|
|
|
|
Status: v1.ConditionTrue,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletReady",
|
|
|
|
Message: fmt.Sprintf("kubelet is posting ready status"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Capacity: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Allocatable: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}}).ReactionChain
|
|
|
|
mockCadvisor := testKubelet.fakeCadvisor
|
|
|
|
mockCadvisor.On("Start").Return(nil)
|
|
|
|
machineInfo := &cadvisorapi.MachineInfo{
|
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
NumCores: 2,
|
|
|
|
MemoryCapacity: 20E9,
|
|
|
|
}
|
|
|
|
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
|
|
|
versionInfo := &cadvisorapi.VersionInfo{
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
}
|
|
|
|
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
|
|
|
|
|
|
|
// Make kubelet report that it is out of disk space.
|
|
|
|
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100); err != nil {
|
|
|
|
t.Fatalf("can't update disk space manager: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-11-18 20:50:58 +00:00
|
|
|
expectedNode := &v1.Node{
|
|
|
|
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
|
|
|
Spec: v1.NodeSpec{},
|
|
|
|
Status: v1.NodeStatus{
|
|
|
|
Conditions: []v1.NodeCondition{
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeOutOfDisk,
|
|
|
|
Status: v1.ConditionTrue,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletOutOfDisk",
|
|
|
|
Message: "out of disk space",
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{}, // placeholder
|
|
|
|
LastTransitionTime: metav1.Time{}, // placeholder
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeMemoryPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletHasSufficientMemory",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient memory available"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-07-22 19:23:09 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeDiskPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-22 19:23:09 +00:00
|
|
|
Reason: "KubeletHasSufficientDisk",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-22 19:23:09 +00:00
|
|
|
},
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeReady,
|
|
|
|
Status: v1.ConditionTrue,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletReady",
|
|
|
|
Message: fmt.Sprintf("kubelet is posting ready status"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{}, // placeholder
|
|
|
|
LastTransitionTime: metav1.Time{}, // placeholder
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
NodeInfo: v1.NodeSystemInfo{
|
2016-07-20 22:08:47 +00:00
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
OSImage: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
OperatingSystem: goruntime.GOOS,
|
|
|
|
Architecture: goruntime.GOARCH,
|
|
|
|
ContainerRuntimeVersion: "test://1.5.0",
|
|
|
|
KubeletVersion: version.Get().String(),
|
|
|
|
KubeProxyVersion: version.Get().String(),
|
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Capacity: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Allocatable: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Addresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"},
|
|
|
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
// images will be sorted from max to min in node status.
|
2016-11-18 20:50:58 +00:00
|
|
|
Images: []v1.ContainerImage{
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
|
|
|
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
|
|
|
|
SizeBytes: 456,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
|
|
|
|
SizeBytes: 123,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
kubelet.updateRuntimeUp()
|
|
|
|
if err := kubelet.updateNodeStatus(); err != nil {
|
|
|
|
t.Errorf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
actions := kubeClient.Actions()
|
|
|
|
if len(actions) != 2 {
|
|
|
|
t.Errorf("unexpected actions: %v", actions)
|
|
|
|
}
|
|
|
|
updateAction, ok := actions[1].(core.UpdateAction)
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1])
|
|
|
|
}
|
2016-11-18 20:50:58 +00:00
|
|
|
updatedNode, ok := updateAction.GetObject().(*v1.Node)
|
2016-07-20 22:08:47 +00:00
|
|
|
if !ok {
|
|
|
|
t.Errorf("unexpected object type")
|
|
|
|
}
|
|
|
|
for i, cond := range updatedNode.Status.Conditions {
|
|
|
|
// Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same.
|
2016-12-03 18:57:26 +00:00
|
|
|
if old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; reflect.DeepEqual(cond.LastHeartbeatTime.Rfc3339Copy().UTC(), old) {
|
|
|
|
t.Errorf("Condition %v LastProbeTime: expected \n%v\n, got \n%v", cond.Type, metav1.Now(), old)
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
2016-12-03 18:57:26 +00:00
|
|
|
if got, want := cond.LastTransitionTime.Rfc3339Copy().UTC(), metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; !reflect.DeepEqual(got, want) {
|
2016-07-20 22:08:47 +00:00
|
|
|
t.Errorf("Condition %v LastTransitionTime: expected \n%#v\n, got \n%#v", cond.Type, want, got)
|
|
|
|
}
|
2016-12-03 18:57:26 +00:00
|
|
|
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
|
|
|
|
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
2016-11-18 20:50:58 +00:00
|
|
|
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != v1.NodeReady {
|
2016-07-20 22:08:47 +00:00
|
|
|
t.Errorf("unexpected node condition order. NodeReady should be last.")
|
|
|
|
}
|
|
|
|
|
2016-11-19 23:32:10 +00:00
|
|
|
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
|
2016-08-19 01:20:47 +00:00
|
|
|
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
clock := testKubelet.fakeClock
|
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
2016-11-18 20:50:58 +00:00
|
|
|
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
|
|
|
Spec: v1.NodeSpec{},
|
|
|
|
Status: v1.NodeStatus{
|
|
|
|
Conditions: []v1.NodeCondition{
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeReady,
|
|
|
|
Status: v1.ConditionTrue,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletReady",
|
|
|
|
Message: fmt.Sprintf("kubelet is posting ready status"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.NewTime(clock.Now()),
|
|
|
|
LastTransitionTime: metav1.NewTime(clock.Now()),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeOutOfDisk,
|
|
|
|
Status: v1.ConditionTrue,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletOutOfDisk",
|
|
|
|
Message: "out of disk space",
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.NewTime(clock.Now()),
|
|
|
|
LastTransitionTime: metav1.NewTime(clock.Now()),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}}).ReactionChain
|
|
|
|
mockCadvisor := testKubelet.fakeCadvisor
|
|
|
|
machineInfo := &cadvisorapi.MachineInfo{
|
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
NumCores: 2,
|
|
|
|
MemoryCapacity: 1024,
|
|
|
|
}
|
2016-07-22 19:23:09 +00:00
|
|
|
fsInfo := cadvisorapiv2.FsInfo{
|
|
|
|
Device: "123",
|
|
|
|
}
|
2016-07-20 22:08:47 +00:00
|
|
|
mockCadvisor.On("Start").Return(nil)
|
|
|
|
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
2016-07-22 19:23:09 +00:00
|
|
|
mockCadvisor.On("ImagesFsInfo").Return(fsInfo, nil)
|
|
|
|
mockCadvisor.On("RootFsInfo").Return(fsInfo, nil)
|
2016-07-20 22:08:47 +00:00
|
|
|
versionInfo := &cadvisorapi.VersionInfo{
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
DockerVersion: "1.5.0",
|
|
|
|
}
|
|
|
|
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
|
|
|
|
|
|
|
kubelet.outOfDiskTransitionFrequency = 5 * time.Second
|
|
|
|
|
2016-11-18 20:50:58 +00:00
|
|
|
ood := v1.NodeCondition{
|
|
|
|
Type: v1.NodeOutOfDisk,
|
|
|
|
Status: v1.ConditionTrue,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletOutOfDisk",
|
|
|
|
Message: "out of disk space",
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.NewTime(clock.Now()), // placeholder
|
|
|
|
LastTransitionTime: metav1.NewTime(clock.Now()), // placeholder
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
2016-11-18 20:50:58 +00:00
|
|
|
noOod := v1.NodeCondition{
|
|
|
|
Type: v1.NodeOutOfDisk,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletHasSufficientDisk",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.NewTime(clock.Now()), // placeholder
|
|
|
|
LastTransitionTime: metav1.NewTime(clock.Now()), // placeholder
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []struct {
|
|
|
|
rootFsAvail uint64
|
|
|
|
dockerFsAvail uint64
|
2016-11-18 20:50:58 +00:00
|
|
|
expected v1.NodeCondition
|
2016-07-20 22:08:47 +00:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
// NodeOutOfDisk==false
|
|
|
|
rootFsAvail: 200,
|
|
|
|
dockerFsAvail: 200,
|
|
|
|
expected: ood,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// NodeOutOfDisk==true
|
|
|
|
rootFsAvail: 50,
|
|
|
|
dockerFsAvail: 200,
|
|
|
|
expected: ood,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// NodeOutOfDisk==false
|
|
|
|
rootFsAvail: 200,
|
|
|
|
dockerFsAvail: 200,
|
|
|
|
expected: ood,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// NodeOutOfDisk==true
|
|
|
|
rootFsAvail: 200,
|
|
|
|
dockerFsAvail: 50,
|
|
|
|
expected: ood,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// NodeOutOfDisk==false
|
|
|
|
rootFsAvail: 200,
|
|
|
|
dockerFsAvail: 200,
|
|
|
|
expected: noOod,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
kubelet.updateRuntimeUp()
|
|
|
|
for tcIdx, tc := range testCases {
|
|
|
|
// Step by a second
|
|
|
|
clock.Step(1 * time.Second)
|
|
|
|
|
|
|
|
// Setup expected times.
|
2016-12-03 18:57:26 +00:00
|
|
|
tc.expected.LastHeartbeatTime = metav1.NewTime(clock.Now())
|
2016-07-20 22:08:47 +00:00
|
|
|
// In the last case, there should be a status transition for NodeOutOfDisk
|
|
|
|
if tcIdx == len(testCases)-1 {
|
2016-12-03 18:57:26 +00:00
|
|
|
tc.expected.LastTransitionTime = metav1.NewTime(clock.Now())
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make kubelet report that it has sufficient disk space
|
|
|
|
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, tc.rootFsAvail, tc.dockerFsAvail, 100, 100); err != nil {
|
|
|
|
t.Fatalf("can't update disk space manager: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := kubelet.updateNodeStatus(); err != nil {
|
|
|
|
t.Errorf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
actions := kubeClient.Actions()
|
|
|
|
if len(actions) != 2 {
|
|
|
|
t.Errorf("%d. unexpected actions: %v", tcIdx, actions)
|
|
|
|
}
|
|
|
|
updateAction, ok := actions[1].(core.UpdateAction)
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("%d. unexpected action type. expected UpdateAction, got %#v", tcIdx, actions[1])
|
|
|
|
}
|
2016-11-18 20:50:58 +00:00
|
|
|
updatedNode, ok := updateAction.GetObject().(*v1.Node)
|
2016-07-20 22:08:47 +00:00
|
|
|
if !ok {
|
|
|
|
t.Errorf("%d. unexpected object type", tcIdx)
|
|
|
|
}
|
|
|
|
kubeClient.ClearActions()
|
|
|
|
|
2016-11-18 20:50:58 +00:00
|
|
|
var oodCondition v1.NodeCondition
|
2016-07-20 22:08:47 +00:00
|
|
|
for i, cond := range updatedNode.Status.Conditions {
|
2016-11-18 20:50:58 +00:00
|
|
|
if cond.Type == v1.NodeOutOfDisk {
|
2016-07-20 22:08:47 +00:00
|
|
|
oodCondition = updatedNode.Status.Conditions[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(tc.expected, oodCondition) {
|
2016-08-19 01:20:47 +00:00
|
|
|
t.Errorf("%d.\nunexpected objects: %s", tcIdx, diff.ObjectDiff(tc.expected, oodCondition))
|
|
|
|
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
clock := testKubelet.fakeClock
|
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
2016-11-18 20:50:58 +00:00
|
|
|
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{
|
|
|
|
{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}},
|
2016-07-20 22:08:47 +00:00
|
|
|
}}).ReactionChain
|
|
|
|
mockCadvisor := testKubelet.fakeCadvisor
|
|
|
|
mockCadvisor.On("Start").Return(nil)
|
|
|
|
machineInfo := &cadvisorapi.MachineInfo{
|
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
NumCores: 2,
|
|
|
|
MemoryCapacity: 10E9,
|
|
|
|
}
|
|
|
|
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
|
|
|
versionInfo := &cadvisorapi.VersionInfo{
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
}
|
|
|
|
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
|
|
|
|
|
|
|
// Make kubelet report that it has sufficient disk space.
|
|
|
|
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
|
|
|
|
t.Fatalf("can't update disk space manager: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-11-18 20:50:58 +00:00
|
|
|
expectedNode := &v1.Node{
|
|
|
|
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
|
|
|
Spec: v1.NodeSpec{},
|
|
|
|
Status: v1.NodeStatus{
|
|
|
|
Conditions: []v1.NodeCondition{
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeOutOfDisk,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletHasSufficientDisk",
|
|
|
|
Message: "kubelet has sufficient disk space available",
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeMemoryPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletHasSufficientMemory",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient memory available"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-07-22 19:23:09 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeDiskPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-22 19:23:09 +00:00
|
|
|
Reason: "KubeletHasNoDiskPressure",
|
|
|
|
Message: fmt.Sprintf("kubelet has no disk pressure"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-22 19:23:09 +00:00
|
|
|
},
|
2016-07-20 22:08:47 +00:00
|
|
|
{}, //placeholder
|
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
NodeInfo: v1.NodeSystemInfo{
|
2016-07-20 22:08:47 +00:00
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
OSImage: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
OperatingSystem: goruntime.GOOS,
|
|
|
|
Architecture: goruntime.GOARCH,
|
|
|
|
ContainerRuntimeVersion: "test://1.5.0",
|
|
|
|
KubeletVersion: version.Get().String(),
|
|
|
|
KubeProxyVersion: version.Get().String(),
|
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Capacity: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Allocatable: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Addresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"},
|
|
|
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Images: []v1.ContainerImage{
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
|
|
|
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
|
|
|
|
SizeBytes: 456,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
|
|
|
|
SizeBytes: 123,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus := func(status v1.ConditionStatus, reason string) {
|
2016-07-20 22:08:47 +00:00
|
|
|
kubeClient.ClearActions()
|
|
|
|
if err := kubelet.updateNodeStatus(); err != nil {
|
|
|
|
t.Errorf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
actions := kubeClient.Actions()
|
|
|
|
if len(actions) != 2 {
|
|
|
|
t.Fatalf("unexpected actions: %v", actions)
|
|
|
|
}
|
|
|
|
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
|
|
|
|
t.Fatalf("unexpected actions: %v", actions)
|
|
|
|
}
|
2016-11-18 20:50:58 +00:00
|
|
|
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*v1.Node)
|
2016-07-20 22:08:47 +00:00
|
|
|
if !ok {
|
|
|
|
t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1])
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, cond := range updatedNode.Status.Conditions {
|
|
|
|
if cond.LastHeartbeatTime.IsZero() {
|
|
|
|
t.Errorf("unexpected zero last probe timestamp")
|
|
|
|
}
|
|
|
|
if cond.LastTransitionTime.IsZero() {
|
|
|
|
t.Errorf("unexpected zero last transition timestamp")
|
|
|
|
}
|
2016-12-03 18:57:26 +00:00
|
|
|
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
|
|
|
|
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
|
|
|
lastIndex := len(updatedNode.Status.Conditions) - 1
|
2016-11-18 20:50:58 +00:00
|
|
|
if updatedNode.Status.Conditions[lastIndex].Type != v1.NodeReady {
|
2016-07-20 22:08:47 +00:00
|
|
|
t.Errorf("unexpected node condition order. NodeReady should be last.")
|
|
|
|
}
|
2016-11-03 01:23:57 +00:00
|
|
|
if updatedNode.Status.Conditions[lastIndex].Message == "" {
|
|
|
|
t.Errorf("unexpected empty condition message")
|
|
|
|
}
|
|
|
|
updatedNode.Status.Conditions[lastIndex].Message = ""
|
2016-11-18 20:50:58 +00:00
|
|
|
expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{
|
|
|
|
Type: v1.NodeReady,
|
2016-07-20 22:08:47 +00:00
|
|
|
Status: status,
|
|
|
|
Reason: reason,
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
2016-11-19 23:32:10 +00:00
|
|
|
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
|
2016-07-20 22:08:47 +00:00
|
|
|
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-03 01:23:57 +00:00
|
|
|
// TODO(random-liu): Refactor the unit test to be table driven test.
|
2016-07-20 22:08:47 +00:00
|
|
|
// Should report kubelet not ready if the runtime check is out of date
|
|
|
|
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-07-20 22:08:47 +00:00
|
|
|
|
|
|
|
// Should report kubelet ready if the runtime check is updated
|
|
|
|
clock.SetTime(time.Now())
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionTrue, "KubeletReady")
|
2016-07-20 22:08:47 +00:00
|
|
|
|
|
|
|
// Should report kubelet not ready if the runtime check is out of date
|
|
|
|
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-07-20 22:08:47 +00:00
|
|
|
|
|
|
|
// Should report kubelet not ready if the runtime check failed
|
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
|
|
|
// Inject error into fake runtime status check, node should be NotReady
|
|
|
|
fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
|
|
|
|
clock.SetTime(time.Now())
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-11-02 04:39:46 +00:00
|
|
|
|
|
|
|
// Test cri integration.
|
2016-11-06 17:20:57 +00:00
|
|
|
kubelet.kubeletConfiguration.EnableCRI = true
|
2016-11-02 04:39:46 +00:00
|
|
|
fakeRuntime.StatusErr = nil
|
|
|
|
|
|
|
|
// Should report node not ready if runtime status is nil.
|
|
|
|
fakeRuntime.RuntimeStatus = nil
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-11-02 04:39:46 +00:00
|
|
|
|
|
|
|
// Should report node not ready if runtime status is empty.
|
|
|
|
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{}
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-11-02 04:39:46 +00:00
|
|
|
|
|
|
|
// Should report node not ready if RuntimeReady is false.
|
|
|
|
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
|
|
|
|
Conditions: []kubecontainer.RuntimeCondition{
|
|
|
|
{Type: kubecontainer.RuntimeReady, Status: false},
|
2016-11-03 01:23:57 +00:00
|
|
|
{Type: kubecontainer.NetworkReady, Status: true},
|
2016-11-02 04:39:46 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-11-02 04:39:46 +00:00
|
|
|
|
|
|
|
// Should report node ready if RuntimeReady is true.
|
|
|
|
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
|
|
|
|
Conditions: []kubecontainer.RuntimeCondition{
|
|
|
|
{Type: kubecontainer.RuntimeReady, Status: true},
|
2016-11-03 01:23:57 +00:00
|
|
|
{Type: kubecontainer.NetworkReady, Status: true},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionTrue, "KubeletReady")
|
2016-11-03 01:23:57 +00:00
|
|
|
|
|
|
|
// Should report node not ready if NetworkReady is false.
|
|
|
|
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
|
|
|
|
Conditions: []kubecontainer.RuntimeCondition{
|
|
|
|
{Type: kubecontainer.RuntimeReady, Status: true},
|
|
|
|
{Type: kubecontainer.NetworkReady, Status: false},
|
2016-11-02 04:39:46 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateNodeStatusError(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
// No matching node for the kubelet
|
2016-11-18 20:50:58 +00:00
|
|
|
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
|
2016-07-20 22:08:47 +00:00
|
|
|
|
|
|
|
if err := kubelet.updateNodeStatus(); err == nil {
|
|
|
|
t.Errorf("unexpected non error: %v", err)
|
|
|
|
}
|
|
|
|
if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry {
|
|
|
|
t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-30 17:40:25 +00:00
|
|
|
func TestRegisterWithApiServer(t *testing.T) {
|
2016-07-20 22:08:47 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
|
|
|
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
// Return an error on create.
|
2016-11-18 20:50:58 +00:00
|
|
|
return true, &v1.Node{}, &apierrors.StatusError{
|
2016-12-03 18:57:26 +00:00
|
|
|
ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
// Return an existing (matching) node on get.
|
2016-11-18 20:50:58 +00:00
|
|
|
return true, &v1.Node{
|
|
|
|
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
|
|
|
|
Spec: v1.NodeSpec{ExternalID: testKubeletHostname},
|
2016-07-20 22:08:47 +00:00
|
|
|
}, nil
|
|
|
|
})
|
|
|
|
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
|
|
|
})
|
|
|
|
machineInfo := &cadvisorapi.MachineInfo{
|
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
NumCores: 2,
|
|
|
|
MemoryCapacity: 1024,
|
|
|
|
}
|
|
|
|
mockCadvisor := testKubelet.fakeCadvisor
|
|
|
|
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
|
|
|
versionInfo := &cadvisorapi.VersionInfo{
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
DockerVersion: "1.5.0",
|
|
|
|
}
|
|
|
|
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
|
|
|
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
|
|
Usage: 400 * mb,
|
|
|
|
Capacity: 1000 * mb,
|
|
|
|
Available: 600 * mb,
|
|
|
|
}, nil)
|
|
|
|
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
|
|
Usage: 9 * mb,
|
|
|
|
Capacity: 10 * mb,
|
|
|
|
}, nil)
|
|
|
|
|
|
|
|
done := make(chan struct{})
|
|
|
|
go func() {
|
2016-08-30 17:40:25 +00:00
|
|
|
kubelet.registerWithApiServer()
|
2016-07-20 22:08:47 +00:00
|
|
|
done <- struct{}{}
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case <-time.After(wait.ForeverTestTimeout):
|
|
|
|
t.Errorf("timed out waiting for registration")
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2016-08-30 17:40:25 +00:00
|
|
|
|
|
|
|
func TestTryRegisterWithApiServer(t *testing.T) {
|
|
|
|
alreadyExists := &apierrors.StatusError{
|
2016-12-03 18:57:26 +00:00
|
|
|
ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
|
2016-08-30 17:40:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
conflict := &apierrors.StatusError{
|
2016-12-03 18:57:26 +00:00
|
|
|
ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict},
|
2016-08-30 17:40:25 +00:00
|
|
|
}
|
|
|
|
|
2016-11-18 20:50:58 +00:00
|
|
|
newNode := func(cmad bool, externalID string) *v1.Node {
|
|
|
|
node := &v1.Node{
|
|
|
|
ObjectMeta: v1.ObjectMeta{},
|
|
|
|
Spec: v1.NodeSpec{
|
2016-08-30 17:40:25 +00:00
|
|
|
ExternalID: externalID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if cmad {
|
|
|
|
node.Annotations = make(map[string]string)
|
|
|
|
node.Annotations[volumehelper.ControllerManagedAttachAnnotation] = "true"
|
|
|
|
}
|
|
|
|
|
|
|
|
return node
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
2016-11-18 20:50:58 +00:00
|
|
|
newNode *v1.Node
|
|
|
|
existingNode *v1.Node
|
2016-08-30 17:40:25 +00:00
|
|
|
createError error
|
|
|
|
getError error
|
|
|
|
updateError error
|
|
|
|
deleteError error
|
|
|
|
expectedResult bool
|
|
|
|
expectedActions int
|
|
|
|
testSavedNode bool
|
|
|
|
savedNodeIndex int
|
|
|
|
savedNodeCMAD bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "success case - new node",
|
2016-11-18 20:50:58 +00:00
|
|
|
newNode: &v1.Node{},
|
2016-08-30 17:40:25 +00:00
|
|
|
expectedResult: true,
|
|
|
|
expectedActions: 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "success case - existing node - no change in CMAD",
|
|
|
|
newNode: newNode(true, "a"),
|
|
|
|
createError: alreadyExists,
|
|
|
|
existingNode: newNode(true, "a"),
|
|
|
|
expectedResult: true,
|
|
|
|
expectedActions: 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "success case - existing node - CMAD disabled",
|
|
|
|
newNode: newNode(false, "a"),
|
|
|
|
createError: alreadyExists,
|
|
|
|
existingNode: newNode(true, "a"),
|
|
|
|
expectedResult: true,
|
|
|
|
expectedActions: 3,
|
|
|
|
testSavedNode: true,
|
|
|
|
savedNodeIndex: 2,
|
|
|
|
savedNodeCMAD: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "success case - existing node - CMAD enabled",
|
|
|
|
newNode: newNode(true, "a"),
|
|
|
|
createError: alreadyExists,
|
|
|
|
existingNode: newNode(false, "a"),
|
|
|
|
expectedResult: true,
|
|
|
|
expectedActions: 3,
|
|
|
|
testSavedNode: true,
|
|
|
|
savedNodeIndex: 2,
|
|
|
|
savedNodeCMAD: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "success case - external ID changed",
|
|
|
|
newNode: newNode(false, "b"),
|
|
|
|
createError: alreadyExists,
|
|
|
|
existingNode: newNode(false, "a"),
|
|
|
|
expectedResult: false,
|
|
|
|
expectedActions: 3,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "create failed",
|
|
|
|
newNode: newNode(false, "b"),
|
|
|
|
createError: conflict,
|
|
|
|
expectedResult: false,
|
|
|
|
expectedActions: 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "get existing node failed",
|
|
|
|
newNode: newNode(false, "a"),
|
|
|
|
createError: alreadyExists,
|
|
|
|
getError: conflict,
|
|
|
|
expectedResult: false,
|
|
|
|
expectedActions: 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "update existing node failed",
|
|
|
|
newNode: newNode(false, "a"),
|
|
|
|
createError: alreadyExists,
|
|
|
|
existingNode: newNode(true, "a"),
|
|
|
|
updateError: conflict,
|
|
|
|
expectedResult: false,
|
|
|
|
expectedActions: 3,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "delete existing node failed",
|
|
|
|
newNode: newNode(false, "b"),
|
|
|
|
createError: alreadyExists,
|
|
|
|
existingNode: newNode(false, "a"),
|
|
|
|
deleteError: conflict,
|
|
|
|
expectedResult: false,
|
|
|
|
expectedActions: 3,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-09-05 04:23:43 +00:00
|
|
|
notImplemented := func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
|
|
|
}
|
|
|
|
|
2016-08-30 17:40:25 +00:00
|
|
|
for _, tc := range cases {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled is a don't-care for this test */)
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
|
|
|
|
|
|
|
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
return true, nil, tc.createError
|
|
|
|
})
|
|
|
|
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
// Return an existing (matching) node on get.
|
|
|
|
return true, tc.existingNode, tc.getError
|
|
|
|
})
|
2016-09-05 04:23:43 +00:00
|
|
|
kubeClient.AddReactor("update", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
if action.GetSubresource() == "status" {
|
2016-09-02 20:24:39 +00:00
|
|
|
return true, nil, tc.updateError
|
|
|
|
}
|
2016-09-05 04:23:43 +00:00
|
|
|
return notImplemented(action)
|
2016-08-30 17:40:25 +00:00
|
|
|
})
|
|
|
|
kubeClient.AddReactor("delete", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
return true, nil, tc.deleteError
|
|
|
|
})
|
|
|
|
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
2016-09-05 04:23:43 +00:00
|
|
|
return notImplemented(action)
|
2016-08-30 17:40:25 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
result := kubelet.tryRegisterWithApiServer(tc.newNode)
|
|
|
|
if e, a := tc.expectedResult, result; e != a {
|
|
|
|
t.Errorf("%v: unexpected result; expected %v got %v", tc.name, e, a)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
actions := kubeClient.Actions()
|
|
|
|
if e, a := tc.expectedActions, len(actions); e != a {
|
|
|
|
t.Errorf("%v: unexpected number of actions, expected %v, got %v", tc.name, e, a)
|
|
|
|
}
|
|
|
|
|
|
|
|
if tc.testSavedNode {
|
2016-11-18 20:50:58 +00:00
|
|
|
var savedNode *v1.Node
|
2016-08-30 17:40:25 +00:00
|
|
|
var ok bool
|
|
|
|
|
|
|
|
t.Logf("actions: %v: %+v", len(actions), actions)
|
|
|
|
action := actions[tc.savedNodeIndex]
|
|
|
|
if action.GetVerb() == "create" {
|
|
|
|
createAction := action.(core.CreateAction)
|
2016-11-18 20:50:58 +00:00
|
|
|
savedNode, ok = createAction.GetObject().(*v1.Node)
|
2016-08-30 17:40:25 +00:00
|
|
|
if !ok {
|
2016-11-18 20:50:58 +00:00
|
|
|
t.Errorf("%v: unexpected type; couldn't convert to *v1.Node: %+v", tc.name, createAction.GetObject())
|
2016-08-30 17:40:25 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
} else if action.GetVerb() == "update" {
|
|
|
|
updateAction := action.(core.UpdateAction)
|
2016-11-18 20:50:58 +00:00
|
|
|
savedNode, ok = updateAction.GetObject().(*v1.Node)
|
2016-08-30 17:40:25 +00:00
|
|
|
if !ok {
|
2016-11-18 20:50:58 +00:00
|
|
|
t.Errorf("%v: unexpected type; couldn't convert to *v1.Node: %+v", tc.name, updateAction.GetObject())
|
2016-08-30 17:40:25 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[volumehelper.ControllerManagedAttachAnnotation])
|
|
|
|
if e, a := tc.savedNodeCMAD, actualCMAD; e != a {
|
|
|
|
t.Errorf("%v: unexpected attach-detach value on saved node; expected %v got %v", tc.name, e, a)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|