2016-07-20 22:08:47 +00:00
|
|
|
/*
|
|
|
|
Copyright 2016 The Kubernetes Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package kubelet
|
|
|
|
|
|
|
|
import (
|
2016-12-01 22:46:20 +00:00
|
|
|
"encoding/json"
|
2016-07-20 22:08:47 +00:00
|
|
|
"fmt"
|
2017-07-13 10:16:49 +00:00
|
|
|
"net"
|
2016-07-20 22:08:47 +00:00
|
|
|
goruntime "runtime"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
2017-09-14 16:15:52 +00:00
|
|
|
"sync/atomic"
|
2016-07-20 22:08:47 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2017-03-06 20:30:31 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2016-07-20 22:08:47 +00:00
|
|
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
|
|
|
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
2017-06-22 18:24:23 +00:00
|
|
|
"k8s.io/api/core/v1"
|
2017-01-25 13:39:54 +00:00
|
|
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
2017-01-13 17:48:50 +00:00
|
|
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
2017-01-25 13:13:07 +00:00
|
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
2017-01-11 14:09:48 +00:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
|
|
"k8s.io/apimachinery/pkg/util/diff"
|
2017-01-16 14:50:37 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/rand"
|
2017-01-24 14:35:22 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
|
|
|
"k8s.io/apimachinery/pkg/util/uuid"
|
2017-01-11 14:09:48 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
2017-06-23 20:56:37 +00:00
|
|
|
"k8s.io/client-go/kubernetes/fake"
|
2017-09-14 16:15:52 +00:00
|
|
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
|
|
"k8s.io/client-go/rest"
|
2017-01-25 20:07:10 +00:00
|
|
|
core "k8s.io/client-go/testing"
|
2017-07-13 10:16:49 +00:00
|
|
|
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
2017-06-06 11:47:40 +00:00
|
|
|
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
2017-02-10 05:14:10 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/cm"
|
2016-07-20 22:08:47 +00:00
|
|
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
2016-08-22 20:38:36 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
2016-07-20 22:08:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/version"
|
2018-02-06 08:38:41 +00:00
|
|
|
"k8s.io/kubernetes/pkg/volume/util"
|
2016-07-20 22:08:47 +00:00
|
|
|
)
|
|
|
|
|
2016-09-16 22:16:08 +00:00
|
|
|
const (
|
|
|
|
maxImageTagsForTest = 20
|
|
|
|
)
|
|
|
|
|
2018-05-22 22:56:02 +00:00
|
|
|
// generateTestingImageLists generate randomly generated image list and corresponding expectedImageList.
|
|
|
|
func generateTestingImageLists(count int, maxImages int) ([]kubecontainer.Image, []v1.ContainerImage) {
|
2016-07-20 22:08:47 +00:00
|
|
|
// imageList is randomly generated image list
|
|
|
|
var imageList []kubecontainer.Image
|
|
|
|
for ; count > 0; count-- {
|
|
|
|
imageItem := kubecontainer.Image{
|
2016-07-26 15:13:18 +00:00
|
|
|
ID: string(uuid.NewUUID()),
|
2016-07-20 22:08:47 +00:00
|
|
|
RepoTags: generateImageTags(),
|
|
|
|
Size: rand.Int63nRange(minImgSize, maxImgSize+1),
|
|
|
|
}
|
|
|
|
imageList = append(imageList, imageItem)
|
|
|
|
}
|
|
|
|
|
2018-05-22 22:56:02 +00:00
|
|
|
expectedImageList := makeExpectedImageList(imageList, maxImages)
|
|
|
|
return imageList, expectedImageList
|
|
|
|
}
|
|
|
|
|
|
|
|
func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.ContainerImage {
|
|
|
|
// expectedImageList is generated by imageList according to size and maxImages
|
2016-07-20 22:08:47 +00:00
|
|
|
// 1. sort the imageList by size
|
2016-08-22 20:38:36 +00:00
|
|
|
sort.Sort(sliceutils.ByImageSize(imageList))
|
2016-11-18 20:50:58 +00:00
|
|
|
// 2. convert sorted imageList to v1.ContainerImage list
|
|
|
|
var expectedImageList []v1.ContainerImage
|
2016-07-20 22:08:47 +00:00
|
|
|
for _, kubeImage := range imageList {
|
2016-11-18 20:50:58 +00:00
|
|
|
apiImage := v1.ContainerImage{
|
2016-09-16 22:16:08 +00:00
|
|
|
Names: kubeImage.RepoTags[0:maxNamesPerImageInNodeStatus],
|
2016-07-20 22:08:47 +00:00
|
|
|
SizeBytes: kubeImage.Size,
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedImageList = append(expectedImageList, apiImage)
|
|
|
|
}
|
2018-05-22 22:56:02 +00:00
|
|
|
// 3. only returns the top maxImages images in expectedImageList
|
|
|
|
if maxImages == -1 { // -1 means no limit
|
|
|
|
return expectedImageList
|
|
|
|
}
|
|
|
|
return expectedImageList[0:maxImages]
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func generateImageTags() []string {
|
|
|
|
var tagList []string
|
2016-09-16 22:16:08 +00:00
|
|
|
// Generate > maxNamesPerImageInNodeStatus tags so that the test can verify
|
|
|
|
// that kubelet report up to maxNamesPerImageInNodeStatus tags.
|
|
|
|
count := rand.IntnRange(maxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1)
|
2016-07-20 22:08:47 +00:00
|
|
|
for ; count > 0; count-- {
|
Switch to k8s.gcr.io vanity domain
This is the 2nd attempt. The previous was reverted while we figured out
the regional mirrors (oops).
New plan: k8s.gcr.io is a read-only facade that auto-detects your source
region (us, eu, or asia for now) and pulls from the closest. To publish
an image, push k8s-staging.gcr.io and it will be synced to the regionals
automatically (similar to today). For now the staging is an alias to
gcr.io/google_containers (the legacy URL).
When we move off of google-owned projects (working on it), then we just
do a one-time sync, and change the google-internal config, and nobody
outside should notice.
We can, in parallel, change the auto-sync into a manual sync - send a PR
to "promote" something from staging, and a bot activates it. Nice and
visible, easy to keep track of.
2018-01-17 19:36:53 +00:00
|
|
|
tagList = append(tagList, "k8s.gcr.io:v"+strconv.Itoa(count))
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
return tagList
|
|
|
|
}
|
|
|
|
|
2016-12-01 22:46:20 +00:00
|
|
|
func applyNodeStatusPatch(originalNode *v1.Node, patch []byte) (*v1.Node, error) {
|
|
|
|
original, err := json.Marshal(originalNode)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to marshal original node %#v: %v", originalNode, err)
|
|
|
|
}
|
|
|
|
updated, err := strategicpatch.StrategicMergePatch(original, patch, v1.Node{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to apply strategic merge patch %q on node %#v: %v",
|
|
|
|
patch, originalNode, err)
|
|
|
|
}
|
|
|
|
updatedNode := &v1.Node{}
|
|
|
|
if err := json.Unmarshal(updated, updatedNode); err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to unmarshal updated node %q: %v", updated, err)
|
|
|
|
}
|
|
|
|
return updatedNode, nil
|
|
|
|
}
|
|
|
|
|
2017-02-10 05:14:10 +00:00
|
|
|
type localCM struct {
|
|
|
|
cm.ContainerManager
|
2018-02-02 20:04:45 +00:00
|
|
|
allocatableReservation v1.ResourceList
|
|
|
|
capacity v1.ResourceList
|
2017-02-10 05:14:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (lcm *localCM) GetNodeAllocatableReservation() v1.ResourceList {
|
2018-02-02 20:04:45 +00:00
|
|
|
return lcm.allocatableReservation
|
2017-02-10 05:14:10 +00:00
|
|
|
}
|
|
|
|
|
2017-06-26 19:49:00 +00:00
|
|
|
func (lcm *localCM) GetCapacity() v1.ResourceList {
|
|
|
|
return lcm.capacity
|
|
|
|
}
|
|
|
|
|
2017-07-13 10:16:49 +00:00
|
|
|
func TestNodeStatusWithCloudProviderNodeIP(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
|
|
defer testKubelet.Cleanup()
|
|
|
|
kubelet := testKubelet.kubelet
|
2017-09-14 16:15:52 +00:00
|
|
|
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
2017-07-13 10:16:49 +00:00
|
|
|
kubelet.hostname = testKubeletHostname
|
|
|
|
|
2018-04-25 22:23:56 +00:00
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
nodeIP net.IP
|
|
|
|
nodeAddresses []v1.NodeAddress
|
|
|
|
expectedAddresses []v1.NodeAddress
|
|
|
|
shouldError bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "A single InternalIP",
|
|
|
|
nodeIP: net.ParseIP("10.1.1.1"),
|
|
|
|
nodeAddresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
2017-07-13 10:16:49 +00:00
|
|
|
},
|
2018-04-25 22:23:56 +00:00
|
|
|
expectedAddresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
2017-07-13 10:16:49 +00:00
|
|
|
},
|
2018-04-25 22:23:56 +00:00
|
|
|
shouldError: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "NodeIP is external",
|
|
|
|
nodeIP: net.ParseIP("55.55.55.55"),
|
|
|
|
nodeAddresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
|
|
|
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
2017-07-13 10:16:49 +00:00
|
|
|
},
|
2018-04-25 22:23:56 +00:00
|
|
|
expectedAddresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
|
|
|
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
2017-07-13 10:16:49 +00:00
|
|
|
},
|
2018-04-25 22:23:56 +00:00
|
|
|
shouldError: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Accommodating #45201 and #49202
|
|
|
|
name: "InternalIP and ExternalIP are the same",
|
|
|
|
nodeIP: net.ParseIP("55.55.55.55"),
|
|
|
|
nodeAddresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "55.55.55.55"},
|
|
|
|
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
2017-07-13 10:16:49 +00:00
|
|
|
},
|
2018-04-25 22:23:56 +00:00
|
|
|
expectedAddresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "55.55.55.55"},
|
|
|
|
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
2017-07-13 10:16:49 +00:00
|
|
|
},
|
2018-04-25 22:23:56 +00:00
|
|
|
shouldError: false,
|
2017-07-13 10:16:49 +00:00
|
|
|
},
|
|
|
|
{
|
2018-04-25 22:23:56 +00:00
|
|
|
name: "An Internal/ExternalIP, an Internal/ExternalDNS",
|
|
|
|
nodeIP: net.ParseIP("10.1.1.1"),
|
|
|
|
nodeAddresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
|
|
|
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
|
|
|
{Type: v1.NodeInternalDNS, Address: "ip-10-1-1-1.us-west-2.compute.internal"},
|
|
|
|
{Type: v1.NodeExternalDNS, Address: "ec2-55-55-55-55.us-west-2.compute.amazonaws.com"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
|
|
|
},
|
|
|
|
expectedAddresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
|
|
|
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
|
|
|
{Type: v1.NodeInternalDNS, Address: "ip-10-1-1-1.us-west-2.compute.internal"},
|
|
|
|
{Type: v1.NodeExternalDNS, Address: "ec2-55-55-55-55.us-west-2.compute.amazonaws.com"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
|
|
|
},
|
|
|
|
shouldError: false,
|
2017-07-13 10:16:49 +00:00
|
|
|
},
|
|
|
|
{
|
2018-04-25 22:23:56 +00:00
|
|
|
name: "An Internal with multiple internal IPs",
|
|
|
|
nodeIP: net.ParseIP("10.1.1.1"),
|
|
|
|
nodeAddresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
|
|
|
{Type: v1.NodeInternalIP, Address: "10.2.2.2"},
|
|
|
|
{Type: v1.NodeInternalIP, Address: "10.3.3.3"},
|
|
|
|
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
|
|
|
},
|
|
|
|
expectedAddresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
|
|
|
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
|
|
|
},
|
|
|
|
shouldError: false,
|
2017-07-13 10:16:49 +00:00
|
|
|
},
|
|
|
|
{
|
2018-04-25 22:23:56 +00:00
|
|
|
name: "An InternalIP that isn't valid: should error",
|
|
|
|
nodeIP: net.ParseIP("10.2.2.2"),
|
|
|
|
nodeAddresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
|
|
|
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
|
|
|
},
|
|
|
|
expectedAddresses: nil,
|
|
|
|
shouldError: true,
|
2017-07-13 10:16:49 +00:00
|
|
|
},
|
|
|
|
}
|
2018-04-25 22:23:56 +00:00
|
|
|
for _, testCase := range cases {
|
|
|
|
// testCase setup
|
|
|
|
existingNode := v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Annotations: make(map[string]string)},
|
|
|
|
Spec: v1.NodeSpec{},
|
|
|
|
}
|
|
|
|
|
|
|
|
kubelet.nodeIP = testCase.nodeIP
|
|
|
|
|
|
|
|
fakeCloud := &fakecloud.FakeCloud{
|
|
|
|
Addresses: testCase.nodeAddresses,
|
|
|
|
Err: nil,
|
|
|
|
}
|
|
|
|
kubelet.cloud = fakeCloud
|
|
|
|
kubelet.cloudproviderRequestParallelism = make(chan int, 1)
|
|
|
|
kubelet.cloudproviderRequestSync = make(chan int)
|
|
|
|
kubelet.cloudproviderRequestTimeout = 10 * time.Second
|
|
|
|
kubelet.nodeIPValidator = func(nodeIP net.IP) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// execute method
|
|
|
|
err := kubelet.setNodeAddress(&existingNode)
|
|
|
|
if err != nil && !testCase.shouldError {
|
|
|
|
t.Errorf("Unexpected error for test %s: %q", testCase.name, err)
|
|
|
|
continue
|
|
|
|
} else if err != nil && testCase.shouldError {
|
|
|
|
// expected an error
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sort both sets for consistent equality
|
|
|
|
sortNodeAddresses(testCase.expectedAddresses)
|
|
|
|
sortNodeAddresses(existingNode.Status.Addresses)
|
|
|
|
|
|
|
|
assert.True(
|
|
|
|
t,
|
|
|
|
apiequality.Semantic.DeepEqual(
|
|
|
|
testCase.expectedAddresses,
|
|
|
|
existingNode.Status.Addresses,
|
|
|
|
),
|
|
|
|
fmt.Sprintf("Test %s failed %%s", testCase.name),
|
|
|
|
diff.ObjectDiff(testCase.expectedAddresses, existingNode.Status.Addresses),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// sortableNodeAddress is a type for sorting []v1.NodeAddress
|
|
|
|
type sortableNodeAddress []v1.NodeAddress
|
|
|
|
|
|
|
|
func (s sortableNodeAddress) Len() int { return len(s) }
|
|
|
|
func (s sortableNodeAddress) Less(i, j int) bool {
|
|
|
|
return (string(s[i].Type) + s[i].Address) < (string(s[j].Type) + s[j].Address)
|
|
|
|
}
|
|
|
|
func (s sortableNodeAddress) Swap(i, j int) { s[j], s[i] = s[i], s[j] }
|
|
|
|
|
|
|
|
func sortNodeAddresses(addrs sortableNodeAddress) {
|
|
|
|
sort.Sort(addrs)
|
2017-07-13 10:16:49 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 22:08:47 +00:00
|
|
|
func TestUpdateNewNodeStatus(t *testing.T) {
|
2018-05-22 22:56:02 +00:00
|
|
|
cases := []struct {
|
|
|
|
desc string
|
|
|
|
nodeStatusMaxImages int32
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
desc: "5 image limit",
|
|
|
|
nodeStatusMaxImages: 5,
|
2017-02-10 05:14:10 +00:00
|
|
|
},
|
2018-05-22 22:56:02 +00:00
|
|
|
{
|
|
|
|
desc: "no image limit",
|
|
|
|
nodeStatusMaxImages: -1,
|
2017-06-26 19:49:00 +00:00
|
|
|
},
|
2017-02-10 05:14:10 +00:00
|
|
|
}
|
2016-07-20 22:08:47 +00:00
|
|
|
|
2018-05-22 22:56:02 +00:00
|
|
|
for _, tc := range cases {
|
|
|
|
t.Run(tc.desc, func(t *testing.T) {
|
|
|
|
// generate one more in inputImageList than we configure the Kubelet to report,
|
|
|
|
// or 5 images if unlimited
|
|
|
|
numTestImages := int(tc.nodeStatusMaxImages) + 1
|
|
|
|
if tc.nodeStatusMaxImages == -1 {
|
|
|
|
numTestImages = 5
|
|
|
|
}
|
|
|
|
inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages))
|
|
|
|
testKubelet := newTestKubeletWithImageList(
|
2018-05-23 16:51:29 +00:00
|
|
|
t, inputImageList, false /* controllerAttachDetachEnabled */, true /*initFakeVolumePlugin*/)
|
2018-05-22 22:56:02 +00:00
|
|
|
defer testKubelet.Cleanup()
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages
|
|
|
|
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
|
|
|
kubelet.containerManager = &localCM{
|
|
|
|
ContainerManager: cm.NewStubContainerManager(),
|
|
|
|
allocatableReservation: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
|
2016-07-22 19:23:09 +00:00
|
|
|
},
|
2018-05-22 22:56:02 +00:00
|
|
|
capacity: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
2017-12-18 02:40:38 +00:00
|
|
|
},
|
2018-05-22 22:56:02 +00:00
|
|
|
}
|
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
|
|
|
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
|
|
|
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
|
|
|
machineInfo := &cadvisorapi.MachineInfo{
|
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
NumCores: 2,
|
|
|
|
MemoryCapacity: 10E9, // 10G
|
|
|
|
}
|
|
|
|
mockCadvisor := testKubelet.fakeCadvisor
|
|
|
|
mockCadvisor.On("Start").Return(nil)
|
|
|
|
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
|
|
|
versionInfo := &cadvisorapi.VersionInfo{
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
}
|
|
|
|
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
|
|
Usage: 400,
|
|
|
|
Capacity: 5000,
|
|
|
|
Available: 600,
|
|
|
|
}, nil)
|
|
|
|
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
|
|
Usage: 400,
|
|
|
|
Capacity: 5000,
|
|
|
|
Available: 600,
|
|
|
|
}, nil)
|
|
|
|
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
|
|
|
maxAge := 0 * time.Second
|
|
|
|
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
|
|
|
|
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
|
|
|
|
kubelet.machineInfo = machineInfo
|
|
|
|
|
|
|
|
expectedNode := &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
|
|
|
Spec: v1.NodeSpec{},
|
|
|
|
Status: v1.NodeStatus{
|
|
|
|
Conditions: []v1.NodeCondition{
|
|
|
|
{
|
|
|
|
Type: v1.NodeOutOfDisk,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "KubeletHasSufficientDisk",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Type: v1.NodeMemoryPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "KubeletHasSufficientMemory",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient memory available"),
|
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Type: v1.NodeDiskPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "KubeletHasNoDiskPressure",
|
|
|
|
Message: fmt.Sprintf("kubelet has no disk pressure"),
|
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Type: v1.NodePIDPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "KubeletHasSufficientPID",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient PID available"),
|
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Type: v1.NodeReady,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "KubeletReady",
|
|
|
|
Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
NodeInfo: v1.NodeSystemInfo{
|
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
OSImage: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
OperatingSystem: goruntime.GOOS,
|
|
|
|
Architecture: goruntime.GOARCH,
|
|
|
|
ContainerRuntimeVersion: "test://1.5.0",
|
|
|
|
KubeletVersion: version.Get().String(),
|
|
|
|
KubeProxyVersion: version.Get().String(),
|
|
|
|
},
|
|
|
|
Capacity: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
|
|
|
},
|
|
|
|
Allocatable: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
|
|
|
|
},
|
|
|
|
Addresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
|
|
|
},
|
|
|
|
Images: expectedImageList,
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2018-05-22 22:56:02 +00:00
|
|
|
}
|
2016-07-20 22:08:47 +00:00
|
|
|
|
2018-05-22 22:56:02 +00:00
|
|
|
kubelet.updateRuntimeUp()
|
|
|
|
assert.NoError(t, kubelet.updateNodeStatus())
|
|
|
|
actions := kubeClient.Actions()
|
|
|
|
require.Len(t, actions, 2)
|
|
|
|
require.True(t, actions[1].Matches("patch", "nodes"))
|
|
|
|
require.Equal(t, actions[1].GetSubresource(), "status")
|
|
|
|
|
|
|
|
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
|
|
|
|
assert.NoError(t, err)
|
|
|
|
for i, cond := range updatedNode.Status.Conditions {
|
|
|
|
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
|
|
|
|
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
|
|
|
|
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
|
|
|
|
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
|
|
|
|
}
|
2017-03-06 20:30:31 +00:00
|
|
|
|
2018-05-22 22:56:02 +00:00
|
|
|
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
|
|
|
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
|
|
|
|
"NotReady should be last")
|
|
|
|
assert.Len(t, updatedNode.Status.Images, len(expectedImageList))
|
|
|
|
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
|
|
|
|
})
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateExistingNodeStatus(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-12-14 22:31:01 +00:00
|
|
|
defer testKubelet.Cleanup()
|
2016-07-20 22:08:47 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2018-05-22 22:56:02 +00:00
|
|
|
kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
|
|
|
|
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
2017-02-10 05:14:10 +00:00
|
|
|
kubelet.containerManager = &localCM{
|
|
|
|
ContainerManager: cm.NewStubContainerManager(),
|
2018-02-02 20:04:45 +00:00
|
|
|
allocatableReservation: v1.ResourceList{
|
2017-02-10 05:14:10 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
|
|
|
},
|
2017-06-26 19:49:00 +00:00
|
|
|
capacity: v1.ResourceList{
|
2018-02-02 20:04:45 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
2017-06-26 19:49:00 +00:00
|
|
|
},
|
2017-02-10 05:14:10 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 22:08:47 +00:00
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
2016-12-01 22:46:20 +00:00
|
|
|
existingNode := v1.Node{
|
2017-01-17 03:38:19 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
2016-12-01 22:46:20 +00:00
|
|
|
Spec: v1.NodeSpec{},
|
|
|
|
Status: v1.NodeStatus{
|
|
|
|
Conditions: []v1.NodeCondition{
|
2017-07-31 18:38:20 +00:00
|
|
|
{
|
|
|
|
Type: v1.NodeOutOfDisk,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "KubeletHasSufficientDisk",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
|
|
|
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
},
|
2016-12-01 22:46:20 +00:00
|
|
|
{
|
|
|
|
Type: v1.NodeMemoryPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "KubeletHasSufficientMemory",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient memory available"),
|
|
|
|
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-12-01 22:46:20 +00:00
|
|
|
{
|
|
|
|
Type: v1.NodeDiskPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "KubeletHasSufficientDisk",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
|
|
|
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2017-12-18 02:40:38 +00:00
|
|
|
{
|
|
|
|
Type: v1.NodePIDPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "KubeletHasSufficientPID",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient PID available"),
|
|
|
|
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
},
|
2016-12-01 22:46:20 +00:00
|
|
|
{
|
|
|
|
Type: v1.NodeReady,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "KubeletReady",
|
|
|
|
Message: fmt.Sprintf("kubelet is posting ready status"),
|
|
|
|
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Capacity: v1.ResourceList{
|
2017-06-20 22:53:54 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
2016-12-01 22:46:20 +00:00
|
|
|
},
|
|
|
|
Allocatable: v1.ResourceList{
|
2017-06-20 22:53:54 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
},
|
2016-12-01 22:46:20 +00:00
|
|
|
}
|
|
|
|
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
2016-07-20 22:08:47 +00:00
|
|
|
mockCadvisor := testKubelet.fakeCadvisor
|
|
|
|
mockCadvisor.On("Start").Return(nil)
|
|
|
|
machineInfo := &cadvisorapi.MachineInfo{
|
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
NumCores: 2,
|
|
|
|
MemoryCapacity: 20E9,
|
|
|
|
}
|
|
|
|
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
|
|
|
versionInfo := &cadvisorapi.VersionInfo{
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
}
|
2018-02-02 20:04:45 +00:00
|
|
|
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
|
|
Usage: 400,
|
|
|
|
Capacity: 5000,
|
|
|
|
Available: 600,
|
|
|
|
}, nil)
|
|
|
|
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
|
|
Usage: 400,
|
|
|
|
Capacity: 5000,
|
|
|
|
Available: 600,
|
|
|
|
}, nil)
|
2016-07-20 22:08:47 +00:00
|
|
|
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
2018-02-16 01:33:22 +00:00
|
|
|
maxAge := 0 * time.Second
|
|
|
|
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
|
|
|
|
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
|
2018-02-02 20:04:45 +00:00
|
|
|
kubelet.machineInfo = machineInfo
|
2016-07-20 22:08:47 +00:00
|
|
|
|
2016-11-18 20:50:58 +00:00
|
|
|
expectedNode := &v1.Node{
|
2017-01-17 03:38:19 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
2016-11-18 20:50:58 +00:00
|
|
|
Spec: v1.NodeSpec{},
|
|
|
|
Status: v1.NodeStatus{
|
|
|
|
Conditions: []v1.NodeCondition{
|
2017-07-31 18:38:20 +00:00
|
|
|
{
|
|
|
|
Type: v1.NodeOutOfDisk,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "KubeletHasSufficientDisk",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
|
|
|
},
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeMemoryPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletHasSufficientMemory",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient memory available"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-07-22 19:23:09 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeDiskPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-22 19:23:09 +00:00
|
|
|
Reason: "KubeletHasSufficientDisk",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-22 19:23:09 +00:00
|
|
|
},
|
2017-12-18 02:40:38 +00:00
|
|
|
{
|
|
|
|
Type: v1.NodePIDPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "KubeletHasSufficientPID",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient PID available"),
|
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
|
|
|
},
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeReady,
|
|
|
|
Status: v1.ConditionTrue,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletReady",
|
|
|
|
Message: fmt.Sprintf("kubelet is posting ready status"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{}, // placeholder
|
|
|
|
LastTransitionTime: metav1.Time{}, // placeholder
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
NodeInfo: v1.NodeSystemInfo{
|
2016-07-20 22:08:47 +00:00
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
OSImage: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
OperatingSystem: goruntime.GOOS,
|
|
|
|
Architecture: goruntime.GOARCH,
|
|
|
|
ContainerRuntimeVersion: "test://1.5.0",
|
|
|
|
KubeletVersion: version.Get().String(),
|
|
|
|
KubeProxyVersion: version.Get().String(),
|
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Capacity: v1.ResourceList{
|
2018-02-02 20:04:45 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Allocatable: v1.ResourceList{
|
2018-02-02 20:04:45 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Addresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
// images will be sorted from max to min in node status.
|
2016-11-18 20:50:58 +00:00
|
|
|
Images: []v1.ContainerImage{
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
Switch to k8s.gcr.io vanity domain
This is the 2nd attempt. The previous was reverted while we figured out
the regional mirrors (oops).
New plan: k8s.gcr.io is a read-only facade that auto-detects your source
region (us, eu, or asia for now) and pulls from the closest. To publish
an image, push k8s-staging.gcr.io and it will be synced to the regionals
automatically (similar to today). For now the staging is an alias to
gcr.io/google_containers (the legacy URL).
When we move off of google-owned projects (working on it), then we just
do a one-time sync, and change the google-internal config, and nobody
outside should notice.
We can, in parallel, change the auto-sync into a manual sync - send a PR
to "promote" something from staging, and a bot activates it. Nice and
visible, easy to keep track of.
2018-01-17 19:36:53 +00:00
|
|
|
Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
|
2016-07-20 22:08:47 +00:00
|
|
|
SizeBytes: 123,
|
|
|
|
},
|
2018-02-02 20:04:45 +00:00
|
|
|
{
|
|
|
|
Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
|
|
|
|
SizeBytes: 456,
|
|
|
|
},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
kubelet.updateRuntimeUp()
|
2017-03-06 20:30:31 +00:00
|
|
|
assert.NoError(t, kubelet.updateNodeStatus())
|
|
|
|
|
2016-07-20 22:08:47 +00:00
|
|
|
actions := kubeClient.Actions()
|
2017-03-06 20:30:31 +00:00
|
|
|
assert.Len(t, actions, 2)
|
|
|
|
|
|
|
|
assert.IsType(t, core.PatchActionImpl{}, actions[1])
|
|
|
|
patchAction := actions[1].(core.PatchActionImpl)
|
|
|
|
|
2016-12-01 22:46:20 +00:00
|
|
|
updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
|
2017-03-06 20:30:31 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2016-07-20 22:08:47 +00:00
|
|
|
for i, cond := range updatedNode.Status.Conditions {
|
2017-03-06 20:30:31 +00:00
|
|
|
old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time
|
|
|
|
// Expect LastHearbeat to be updated to Now, while LastTransitionTime to be the same.
|
|
|
|
assert.NotEqual(t, old, cond.LastHeartbeatTime.Rfc3339Copy().UTC(), "LastHeartbeatTime for condition %v", cond.Type)
|
|
|
|
assert.EqualValues(t, old, cond.LastTransitionTime.Rfc3339Copy().UTC(), "LastTransitionTime for condition %v", cond.Type)
|
|
|
|
|
2016-12-03 18:57:26 +00:00
|
|
|
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
|
|
|
|
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
2017-03-06 20:30:31 +00:00
|
|
|
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
|
|
|
|
"NodeReady should be the last condition")
|
|
|
|
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
2017-09-14 16:15:52 +00:00
|
|
|
func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
|
|
|
|
attempts := int64(0)
|
2018-05-07 16:16:46 +00:00
|
|
|
failureCallbacks := int64(0)
|
2017-09-14 16:15:52 +00:00
|
|
|
|
|
|
|
// set up a listener that hangs connections
|
|
|
|
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
defer ln.Close()
|
|
|
|
go func() {
|
|
|
|
// accept connections and just let them hang
|
|
|
|
for {
|
|
|
|
_, err := ln.Accept()
|
|
|
|
if err != nil {
|
|
|
|
t.Log(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
t.Log("accepted connection")
|
|
|
|
atomic.AddInt64(&attempts, 1)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
config := &rest.Config{
|
|
|
|
Host: "http://" + ln.Addr().String(),
|
|
|
|
QPS: -1,
|
|
|
|
Timeout: time.Second,
|
|
|
|
}
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
|
|
defer testKubelet.Cleanup()
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
|
|
|
kubelet.heartbeatClient, err = v1core.NewForConfig(config)
|
2018-05-07 16:16:46 +00:00
|
|
|
kubelet.onRepeatedHeartbeatFailure = func() {
|
|
|
|
atomic.AddInt64(&failureCallbacks, 1)
|
|
|
|
}
|
2017-09-14 16:15:52 +00:00
|
|
|
kubelet.containerManager = &localCM{
|
|
|
|
ContainerManager: cm.NewStubContainerManager(),
|
2018-02-02 20:04:45 +00:00
|
|
|
allocatableReservation: v1.ResourceList{
|
2017-09-14 16:15:52 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
|
|
|
},
|
|
|
|
capacity: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// should return an error, but not hang
|
|
|
|
assert.Error(t, kubelet.updateNodeStatus())
|
|
|
|
|
|
|
|
// should have attempted multiple times
|
2018-04-20 16:48:51 +00:00
|
|
|
if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts < nodeStatusUpdateRetry {
|
|
|
|
t.Errorf("Expected at least %d attempts, got %d", nodeStatusUpdateRetry, actualAttempts)
|
2017-09-14 16:15:52 +00:00
|
|
|
}
|
2018-05-07 16:16:46 +00:00
|
|
|
// should have gotten multiple failure callbacks
|
|
|
|
if actualFailureCallbacks := atomic.LoadInt64(&failureCallbacks); actualFailureCallbacks < (nodeStatusUpdateRetry - 1) {
|
|
|
|
t.Errorf("Expected %d failure callbacks, got %d", (nodeStatusUpdateRetry - 1), actualFailureCallbacks)
|
|
|
|
}
|
2017-09-14 16:15:52 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 22:08:47 +00:00
|
|
|
func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-12-14 22:31:01 +00:00
|
|
|
defer testKubelet.Cleanup()
|
2016-07-20 22:08:47 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2018-05-22 22:56:02 +00:00
|
|
|
kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
|
|
|
|
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
2017-02-10 05:14:10 +00:00
|
|
|
kubelet.containerManager = &localCM{
|
|
|
|
ContainerManager: cm.NewStubContainerManager(),
|
2018-02-02 20:04:45 +00:00
|
|
|
allocatableReservation: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
|
2017-02-10 05:14:10 +00:00
|
|
|
},
|
2017-06-26 19:49:00 +00:00
|
|
|
capacity: v1.ResourceList{
|
2018-02-02 20:04:45 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
|
2017-06-26 19:49:00 +00:00
|
|
|
},
|
2017-02-10 05:14:10 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 22:08:47 +00:00
|
|
|
clock := testKubelet.fakeClock
|
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
2017-01-17 03:38:19 +00:00
|
|
|
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
2016-12-01 22:46:20 +00:00
|
|
|
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
2016-07-20 22:08:47 +00:00
|
|
|
mockCadvisor := testKubelet.fakeCadvisor
|
|
|
|
mockCadvisor.On("Start").Return(nil)
|
|
|
|
machineInfo := &cadvisorapi.MachineInfo{
|
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
NumCores: 2,
|
|
|
|
MemoryCapacity: 10E9,
|
|
|
|
}
|
|
|
|
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
|
|
|
versionInfo := &cadvisorapi.VersionInfo{
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
}
|
2018-02-02 20:04:45 +00:00
|
|
|
|
2016-07-20 22:08:47 +00:00
|
|
|
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
2018-02-16 01:33:22 +00:00
|
|
|
maxAge := 0 * time.Second
|
|
|
|
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
|
|
|
|
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
|
2018-02-02 20:04:45 +00:00
|
|
|
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
|
|
Usage: 400,
|
|
|
|
Capacity: 10E9,
|
|
|
|
}, nil)
|
|
|
|
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
|
|
Usage: 400,
|
|
|
|
Capacity: 20E9,
|
|
|
|
}, nil)
|
|
|
|
|
|
|
|
kubelet.machineInfo = machineInfo
|
2016-07-20 22:08:47 +00:00
|
|
|
|
2016-11-18 20:50:58 +00:00
|
|
|
expectedNode := &v1.Node{
|
2017-01-17 03:38:19 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
2016-11-18 20:50:58 +00:00
|
|
|
Spec: v1.NodeSpec{},
|
|
|
|
Status: v1.NodeStatus{
|
|
|
|
Conditions: []v1.NodeCondition{
|
2017-07-31 18:38:20 +00:00
|
|
|
{
|
|
|
|
Type: v1.NodeOutOfDisk,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "KubeletHasSufficientDisk",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
|
|
|
},
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeMemoryPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-20 22:08:47 +00:00
|
|
|
Reason: "KubeletHasSufficientMemory",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient memory available"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-07-22 19:23:09 +00:00
|
|
|
{
|
2016-11-18 20:50:58 +00:00
|
|
|
Type: v1.NodeDiskPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
2016-07-22 19:23:09 +00:00
|
|
|
Reason: "KubeletHasNoDiskPressure",
|
|
|
|
Message: fmt.Sprintf("kubelet has no disk pressure"),
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-22 19:23:09 +00:00
|
|
|
},
|
2017-12-18 02:40:38 +00:00
|
|
|
{
|
|
|
|
Type: v1.NodePIDPressure,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "KubeletHasSufficientPID",
|
|
|
|
Message: fmt.Sprintf("kubelet has sufficient PID available"),
|
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
|
|
|
},
|
2016-07-20 22:08:47 +00:00
|
|
|
{}, //placeholder
|
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
NodeInfo: v1.NodeSystemInfo{
|
2016-07-20 22:08:47 +00:00
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
OSImage: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
OperatingSystem: goruntime.GOOS,
|
|
|
|
Architecture: goruntime.GOARCH,
|
|
|
|
ContainerRuntimeVersion: "test://1.5.0",
|
|
|
|
KubeletVersion: version.Get().String(),
|
|
|
|
KubeProxyVersion: version.Get().String(),
|
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Capacity: v1.ResourceList{
|
2018-02-02 20:04:45 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Allocatable: v1.ResourceList{
|
2018-02-02 20:04:45 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Addresses: []v1.NodeAddress{
|
|
|
|
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
|
|
|
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
2016-11-18 20:50:58 +00:00
|
|
|
Images: []v1.ContainerImage{
|
2016-07-20 22:08:47 +00:00
|
|
|
{
|
Switch to k8s.gcr.io vanity domain
This is the 2nd attempt. The previous was reverted while we figured out
the regional mirrors (oops).
New plan: k8s.gcr.io is a read-only facade that auto-detects your source
region (us, eu, or asia for now) and pulls from the closest. To publish
an image, push k8s-staging.gcr.io and it will be synced to the regionals
automatically (similar to today). For now the staging is an alias to
gcr.io/google_containers (the legacy URL).
When we move off of google-owned projects (working on it), then we just
do a one-time sync, and change the google-internal config, and nobody
outside should notice.
We can, in parallel, change the auto-sync into a manual sync - send a PR
to "promote" something from staging, and a bot activates it. Nice and
visible, easy to keep track of.
2018-01-17 19:36:53 +00:00
|
|
|
Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
|
2016-07-20 22:08:47 +00:00
|
|
|
SizeBytes: 123,
|
|
|
|
},
|
2018-02-02 20:04:45 +00:00
|
|
|
{
|
|
|
|
Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
|
|
|
|
SizeBytes: 456,
|
|
|
|
},
|
2016-07-20 22:08:47 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus := func(status v1.ConditionStatus, reason string) {
|
2016-07-20 22:08:47 +00:00
|
|
|
kubeClient.ClearActions()
|
2017-03-06 20:30:31 +00:00
|
|
|
assert.NoError(t, kubelet.updateNodeStatus())
|
2016-07-20 22:08:47 +00:00
|
|
|
actions := kubeClient.Actions()
|
2017-03-06 20:30:31 +00:00
|
|
|
require.Len(t, actions, 2)
|
|
|
|
require.True(t, actions[1].Matches("patch", "nodes"))
|
|
|
|
require.Equal(t, actions[1].GetSubresource(), "status")
|
|
|
|
|
2018-04-12 00:04:19 +00:00
|
|
|
updatedNode, err := kubeClient.CoreV1().Nodes().Get(testKubeletHostname, metav1.GetOptions{})
|
2017-03-06 20:30:31 +00:00
|
|
|
require.NoError(t, err, "can't apply node status patch")
|
2016-07-20 22:08:47 +00:00
|
|
|
|
|
|
|
for i, cond := range updatedNode.Status.Conditions {
|
2017-03-06 20:30:31 +00:00
|
|
|
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
|
|
|
|
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
|
2016-12-03 18:57:26 +00:00
|
|
|
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
|
|
|
|
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
|
|
|
lastIndex := len(updatedNode.Status.Conditions) - 1
|
2017-03-06 20:30:31 +00:00
|
|
|
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[lastIndex].Type, "NodeReady should be the last condition")
|
|
|
|
assert.NotEmpty(t, updatedNode.Status.Conditions[lastIndex].Message)
|
|
|
|
|
2016-11-03 01:23:57 +00:00
|
|
|
updatedNode.Status.Conditions[lastIndex].Message = ""
|
2016-11-18 20:50:58 +00:00
|
|
|
expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{
|
|
|
|
Type: v1.NodeReady,
|
2016-07-20 22:08:47 +00:00
|
|
|
Status: status,
|
|
|
|
Reason: reason,
|
2016-12-03 18:57:26 +00:00
|
|
|
LastHeartbeatTime: metav1.Time{},
|
|
|
|
LastTransitionTime: metav1.Time{},
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
2017-03-06 20:30:31 +00:00
|
|
|
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
2016-11-03 01:23:57 +00:00
|
|
|
// TODO(random-liu): Refactor the unit test to be table driven test.
|
2016-07-20 22:08:47 +00:00
|
|
|
// Should report kubelet not ready if the runtime check is out of date
|
|
|
|
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-07-20 22:08:47 +00:00
|
|
|
|
|
|
|
// Should report kubelet ready if the runtime check is updated
|
|
|
|
clock.SetTime(time.Now())
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionTrue, "KubeletReady")
|
2016-07-20 22:08:47 +00:00
|
|
|
|
|
|
|
// Should report kubelet not ready if the runtime check is out of date
|
|
|
|
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-07-20 22:08:47 +00:00
|
|
|
|
|
|
|
// Should report kubelet not ready if the runtime check failed
|
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
|
|
|
// Inject error into fake runtime status check, node should be NotReady
|
|
|
|
fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
|
|
|
|
clock.SetTime(time.Now())
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-11-02 04:39:46 +00:00
|
|
|
|
|
|
|
fakeRuntime.StatusErr = nil
|
|
|
|
|
|
|
|
// Should report node not ready if runtime status is nil.
|
|
|
|
fakeRuntime.RuntimeStatus = nil
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-11-02 04:39:46 +00:00
|
|
|
|
|
|
|
// Should report node not ready if runtime status is empty.
|
|
|
|
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{}
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-11-02 04:39:46 +00:00
|
|
|
|
|
|
|
// Should report node not ready if RuntimeReady is false.
|
|
|
|
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
|
|
|
|
Conditions: []kubecontainer.RuntimeCondition{
|
|
|
|
{Type: kubecontainer.RuntimeReady, Status: false},
|
2016-11-03 01:23:57 +00:00
|
|
|
{Type: kubecontainer.NetworkReady, Status: true},
|
2016-11-02 04:39:46 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-11-02 04:39:46 +00:00
|
|
|
|
|
|
|
// Should report node ready if RuntimeReady is true.
|
|
|
|
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
|
|
|
|
Conditions: []kubecontainer.RuntimeCondition{
|
|
|
|
{Type: kubecontainer.RuntimeReady, Status: true},
|
2016-11-03 01:23:57 +00:00
|
|
|
{Type: kubecontainer.NetworkReady, Status: true},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionTrue, "KubeletReady")
|
2016-11-03 01:23:57 +00:00
|
|
|
|
|
|
|
// Should report node not ready if NetworkReady is false.
|
|
|
|
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
|
|
|
|
Conditions: []kubecontainer.RuntimeCondition{
|
|
|
|
{Type: kubecontainer.RuntimeReady, Status: true},
|
|
|
|
{Type: kubecontainer.NetworkReady, Status: false},
|
2016-11-02 04:39:46 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
kubelet.updateRuntimeUp()
|
2016-11-18 20:50:58 +00:00
|
|
|
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateNodeStatusError(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-12-14 22:31:01 +00:00
|
|
|
defer testKubelet.Cleanup()
|
2016-07-20 22:08:47 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2017-09-14 16:15:52 +00:00
|
|
|
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
2016-07-20 22:08:47 +00:00
|
|
|
// No matching node for the kubelet
|
2016-11-18 20:50:58 +00:00
|
|
|
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
|
2017-03-06 20:30:31 +00:00
|
|
|
assert.Error(t, kubelet.updateNodeStatus())
|
|
|
|
assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry)
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
|
2016-08-30 17:40:25 +00:00
|
|
|
func TestRegisterWithApiServer(t *testing.T) {
|
2016-07-20 22:08:47 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-12-14 22:31:01 +00:00
|
|
|
defer testKubelet.Cleanup()
|
2016-07-20 22:08:47 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
|
|
|
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
// Return an error on create.
|
2016-11-18 20:50:58 +00:00
|
|
|
return true, &v1.Node{}, &apierrors.StatusError{
|
2016-12-03 18:57:26 +00:00
|
|
|
ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
|
2016-07-20 22:08:47 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
// Return an existing (matching) node on get.
|
2016-11-18 20:50:58 +00:00
|
|
|
return true, &v1.Node{
|
2017-06-06 11:47:40 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: testKubeletHostname,
|
|
|
|
Labels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: testKubeletHostname,
|
|
|
|
kubeletapis.LabelOS: goruntime.GOOS,
|
|
|
|
kubeletapis.LabelArch: goruntime.GOARCH,
|
|
|
|
},
|
|
|
|
},
|
2016-07-20 22:08:47 +00:00
|
|
|
}, nil
|
|
|
|
})
|
|
|
|
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
|
|
|
})
|
|
|
|
machineInfo := &cadvisorapi.MachineInfo{
|
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
NumCores: 2,
|
|
|
|
MemoryCapacity: 1024,
|
|
|
|
}
|
|
|
|
mockCadvisor := testKubelet.fakeCadvisor
|
|
|
|
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
|
|
|
versionInfo := &cadvisorapi.VersionInfo{
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
DockerVersion: "1.5.0",
|
|
|
|
}
|
|
|
|
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
|
|
|
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
2017-07-20 20:23:13 +00:00
|
|
|
Usage: 400,
|
|
|
|
Capacity: 1000,
|
|
|
|
Available: 600,
|
2016-07-20 22:08:47 +00:00
|
|
|
}, nil)
|
|
|
|
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
2017-07-20 20:23:13 +00:00
|
|
|
Usage: 9,
|
|
|
|
Capacity: 10,
|
2016-07-20 22:08:47 +00:00
|
|
|
}, nil)
|
2018-02-02 20:04:45 +00:00
|
|
|
kubelet.machineInfo = machineInfo
|
2016-07-20 22:08:47 +00:00
|
|
|
|
|
|
|
done := make(chan struct{})
|
|
|
|
go func() {
|
2017-06-12 02:03:59 +00:00
|
|
|
kubelet.registerWithAPIServer()
|
2016-07-20 22:08:47 +00:00
|
|
|
done <- struct{}{}
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case <-time.After(wait.ForeverTestTimeout):
|
2017-03-06 20:30:31 +00:00
|
|
|
assert.Fail(t, "timed out waiting for registration")
|
2016-07-20 22:08:47 +00:00
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2016-08-30 17:40:25 +00:00
|
|
|
|
|
|
|
func TestTryRegisterWithApiServer(t *testing.T) {
|
|
|
|
alreadyExists := &apierrors.StatusError{
|
2016-12-03 18:57:26 +00:00
|
|
|
ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
|
2016-08-30 17:40:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
conflict := &apierrors.StatusError{
|
2016-12-03 18:57:26 +00:00
|
|
|
ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict},
|
2016-08-30 17:40:25 +00:00
|
|
|
}
|
|
|
|
|
2018-04-04 20:43:50 +00:00
|
|
|
newNode := func(cmad bool) *v1.Node {
|
2016-11-18 20:50:58 +00:00
|
|
|
node := &v1.Node{
|
2017-06-06 11:47:40 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: testKubeletHostname,
|
|
|
|
kubeletapis.LabelOS: goruntime.GOOS,
|
|
|
|
kubeletapis.LabelArch: goruntime.GOARCH,
|
|
|
|
},
|
|
|
|
},
|
2016-08-30 17:40:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if cmad {
|
|
|
|
node.Annotations = make(map[string]string)
|
2018-02-06 08:38:41 +00:00
|
|
|
node.Annotations[util.ControllerManagedAttachAnnotation] = "true"
|
2016-08-30 17:40:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return node
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
2016-11-18 20:50:58 +00:00
|
|
|
newNode *v1.Node
|
|
|
|
existingNode *v1.Node
|
2016-08-30 17:40:25 +00:00
|
|
|
createError error
|
|
|
|
getError error
|
2016-12-01 22:46:20 +00:00
|
|
|
patchError error
|
2016-08-30 17:40:25 +00:00
|
|
|
deleteError error
|
|
|
|
expectedResult bool
|
|
|
|
expectedActions int
|
|
|
|
testSavedNode bool
|
|
|
|
savedNodeIndex int
|
|
|
|
savedNodeCMAD bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "success case - new node",
|
2016-11-18 20:50:58 +00:00
|
|
|
newNode: &v1.Node{},
|
2016-08-30 17:40:25 +00:00
|
|
|
expectedResult: true,
|
|
|
|
expectedActions: 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "success case - existing node - no change in CMAD",
|
2018-04-04 20:43:50 +00:00
|
|
|
newNode: newNode(true),
|
2016-08-30 17:40:25 +00:00
|
|
|
createError: alreadyExists,
|
2018-04-04 20:43:50 +00:00
|
|
|
existingNode: newNode(true),
|
2016-08-30 17:40:25 +00:00
|
|
|
expectedResult: true,
|
|
|
|
expectedActions: 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "success case - existing node - CMAD disabled",
|
2018-04-04 20:43:50 +00:00
|
|
|
newNode: newNode(false),
|
2016-08-30 17:40:25 +00:00
|
|
|
createError: alreadyExists,
|
2018-04-04 20:43:50 +00:00
|
|
|
existingNode: newNode(true),
|
2016-08-30 17:40:25 +00:00
|
|
|
expectedResult: true,
|
|
|
|
expectedActions: 3,
|
|
|
|
testSavedNode: true,
|
|
|
|
savedNodeIndex: 2,
|
|
|
|
savedNodeCMAD: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "success case - existing node - CMAD enabled",
|
2018-04-04 20:43:50 +00:00
|
|
|
newNode: newNode(true),
|
2016-08-30 17:40:25 +00:00
|
|
|
createError: alreadyExists,
|
2018-04-04 20:43:50 +00:00
|
|
|
existingNode: newNode(false),
|
2016-08-30 17:40:25 +00:00
|
|
|
expectedResult: true,
|
|
|
|
expectedActions: 3,
|
|
|
|
testSavedNode: true,
|
|
|
|
savedNodeIndex: 2,
|
|
|
|
savedNodeCMAD: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "create failed",
|
2018-04-04 20:43:50 +00:00
|
|
|
newNode: newNode(false),
|
2016-08-30 17:40:25 +00:00
|
|
|
createError: conflict,
|
|
|
|
expectedResult: false,
|
|
|
|
expectedActions: 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "get existing node failed",
|
2018-04-04 20:43:50 +00:00
|
|
|
newNode: newNode(false),
|
2016-08-30 17:40:25 +00:00
|
|
|
createError: alreadyExists,
|
|
|
|
getError: conflict,
|
|
|
|
expectedResult: false,
|
|
|
|
expectedActions: 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "update existing node failed",
|
2018-04-04 20:43:50 +00:00
|
|
|
newNode: newNode(false),
|
2016-08-30 17:40:25 +00:00
|
|
|
createError: alreadyExists,
|
2018-04-04 20:43:50 +00:00
|
|
|
existingNode: newNode(true),
|
2016-12-01 22:46:20 +00:00
|
|
|
patchError: conflict,
|
2016-08-30 17:40:25 +00:00
|
|
|
expectedResult: false,
|
|
|
|
expectedActions: 3,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-09-05 04:23:43 +00:00
|
|
|
notImplemented := func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
|
|
|
}
|
|
|
|
|
2016-08-30 17:40:25 +00:00
|
|
|
for _, tc := range cases {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled is a don't-care for this test */)
|
2016-12-14 22:31:01 +00:00
|
|
|
defer testKubelet.Cleanup()
|
2016-08-30 17:40:25 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
|
|
|
|
|
|
|
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
return true, nil, tc.createError
|
|
|
|
})
|
|
|
|
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
// Return an existing (matching) node on get.
|
|
|
|
return true, tc.existingNode, tc.getError
|
|
|
|
})
|
2016-12-01 22:46:20 +00:00
|
|
|
kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
2016-09-05 04:23:43 +00:00
|
|
|
if action.GetSubresource() == "status" {
|
2016-12-01 22:46:20 +00:00
|
|
|
return true, nil, tc.patchError
|
2016-09-02 20:24:39 +00:00
|
|
|
}
|
2016-09-05 04:23:43 +00:00
|
|
|
return notImplemented(action)
|
2016-08-30 17:40:25 +00:00
|
|
|
})
|
|
|
|
kubeClient.AddReactor("delete", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
return true, nil, tc.deleteError
|
|
|
|
})
|
|
|
|
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
2016-09-05 04:23:43 +00:00
|
|
|
return notImplemented(action)
|
2016-08-30 17:40:25 +00:00
|
|
|
})
|
|
|
|
|
2017-06-12 02:03:59 +00:00
|
|
|
result := kubelet.tryRegisterWithAPIServer(tc.newNode)
|
2017-03-06 20:30:31 +00:00
|
|
|
require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name)
|
2016-08-30 17:40:25 +00:00
|
|
|
|
|
|
|
actions := kubeClient.Actions()
|
2017-03-06 20:30:31 +00:00
|
|
|
assert.Len(t, actions, tc.expectedActions, "test [%s]", tc.name)
|
2016-08-30 17:40:25 +00:00
|
|
|
|
|
|
|
if tc.testSavedNode {
|
2016-11-18 20:50:58 +00:00
|
|
|
var savedNode *v1.Node
|
2016-08-30 17:40:25 +00:00
|
|
|
|
|
|
|
t.Logf("actions: %v: %+v", len(actions), actions)
|
|
|
|
action := actions[tc.savedNodeIndex]
|
|
|
|
if action.GetVerb() == "create" {
|
|
|
|
createAction := action.(core.CreateAction)
|
2017-03-06 20:30:31 +00:00
|
|
|
obj := createAction.GetObject()
|
|
|
|
require.IsType(t, &v1.Node{}, obj)
|
|
|
|
savedNode = obj.(*v1.Node)
|
2016-12-01 22:46:20 +00:00
|
|
|
} else if action.GetVerb() == "patch" {
|
|
|
|
patchAction := action.(core.PatchActionImpl)
|
|
|
|
var err error
|
|
|
|
savedNode, err = applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
|
2017-03-06 20:30:31 +00:00
|
|
|
require.NoError(t, err)
|
2016-08-30 17:40:25 +00:00
|
|
|
}
|
|
|
|
|
2018-02-06 08:38:41 +00:00
|
|
|
actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[util.ControllerManagedAttachAnnotation])
|
2017-03-06 20:30:31 +00:00
|
|
|
assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name)
|
2016-08-30 17:40:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-05-26 15:29:54 +00:00
|
|
|
|
|
|
|
func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
|
2018-05-22 22:56:02 +00:00
|
|
|
const nodeStatusMaxImages = 5
|
|
|
|
|
|
|
|
// generate one more in inputImageList than we configure the Kubelet to report
|
|
|
|
inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages)
|
2017-05-26 15:29:54 +00:00
|
|
|
testKubelet := newTestKubeletWithImageList(
|
2018-05-23 16:51:29 +00:00
|
|
|
t, inputImageList, false /* controllerAttachDetachEnabled */, true /* initFakeVolumePlugin */)
|
2017-05-26 15:29:54 +00:00
|
|
|
defer testKubelet.Cleanup()
|
|
|
|
kubelet := testKubelet.kubelet
|
2018-05-22 22:56:02 +00:00
|
|
|
kubelet.nodeStatusMaxImages = nodeStatusMaxImages
|
2017-09-14 16:15:52 +00:00
|
|
|
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
2017-05-26 15:29:54 +00:00
|
|
|
kubelet.containerManager = &localCM{
|
|
|
|
ContainerManager: cm.NewStubContainerManager(),
|
2018-02-02 20:04:45 +00:00
|
|
|
allocatableReservation: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(40000, resource.DecimalSI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(1000, resource.BinarySI),
|
2017-05-26 15:29:54 +00:00
|
|
|
},
|
2017-06-26 19:49:00 +00:00
|
|
|
capacity: v1.ResourceList{
|
2018-02-02 20:04:45 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
|
2017-06-26 19:49:00 +00:00
|
|
|
},
|
2017-05-26 15:29:54 +00:00
|
|
|
}
|
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
|
|
|
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
|
|
|
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
|
|
|
machineInfo := &cadvisorapi.MachineInfo{
|
|
|
|
MachineID: "123",
|
|
|
|
SystemUUID: "abc",
|
|
|
|
BootID: "1b3",
|
|
|
|
NumCores: 2,
|
|
|
|
MemoryCapacity: 10E9, // 10G
|
|
|
|
}
|
|
|
|
mockCadvisor := testKubelet.fakeCadvisor
|
|
|
|
mockCadvisor.On("Start").Return(nil)
|
|
|
|
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
|
|
|
versionInfo := &cadvisorapi.VersionInfo{
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
}
|
|
|
|
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
2018-02-16 01:33:22 +00:00
|
|
|
maxAge := 0 * time.Second
|
|
|
|
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
|
|
|
|
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
|
2018-02-02 20:04:45 +00:00
|
|
|
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
|
|
Usage: 400,
|
|
|
|
Capacity: 3000,
|
|
|
|
Available: 600,
|
|
|
|
}, nil)
|
|
|
|
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
|
|
Usage: 400,
|
|
|
|
Capacity: 3000,
|
|
|
|
Available: 600,
|
|
|
|
}, nil)
|
|
|
|
kubelet.machineInfo = machineInfo
|
2017-05-26 15:29:54 +00:00
|
|
|
|
|
|
|
expectedNode := &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
|
|
|
Spec: v1.NodeSpec{},
|
|
|
|
Status: v1.NodeStatus{
|
|
|
|
Capacity: v1.ResourceList{
|
2018-02-02 20:04:45 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
|
2017-05-26 15:29:54 +00:00
|
|
|
},
|
|
|
|
Allocatable: v1.ResourceList{
|
2018-02-02 20:04:45 +00:00
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
|
|
|
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
|
|
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
|
2017-05-26 15:29:54 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
kubelet.updateRuntimeUp()
|
|
|
|
assert.NoError(t, kubelet.updateNodeStatus())
|
|
|
|
actions := kubeClient.Actions()
|
|
|
|
require.Len(t, actions, 2)
|
|
|
|
require.True(t, actions[1].Matches("patch", "nodes"))
|
|
|
|
require.Equal(t, actions[1].GetSubresource(), "status")
|
|
|
|
|
|
|
|
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
|
|
|
|
assert.NoError(t, err)
|
2017-05-30 15:08:31 +00:00
|
|
|
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable), "%s", diff.ObjectDiff(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable))
|
2017-05-26 15:29:54 +00:00
|
|
|
}
|
2017-06-06 11:47:40 +00:00
|
|
|
|
|
|
|
func TestUpdateDefaultLabels(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2017-09-14 16:15:52 +00:00
|
|
|
testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
2017-06-06 11:47:40 +00:00
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
initialNode *v1.Node
|
|
|
|
existingNode *v1.Node
|
|
|
|
needsUpdate bool
|
|
|
|
finalLabels map[string]string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "make sure default labels exist",
|
|
|
|
initialNode: &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "new-hostname",
|
|
|
|
kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "new-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "new-instance-type",
|
|
|
|
kubeletapis.LabelOS: "new-os",
|
|
|
|
kubeletapis.LabelArch: "new-arch",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
existingNode: &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: map[string]string{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
needsUpdate: true,
|
|
|
|
finalLabels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "new-hostname",
|
|
|
|
kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "new-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "new-instance-type",
|
|
|
|
kubeletapis.LabelOS: "new-os",
|
|
|
|
kubeletapis.LabelArch: "new-arch",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "make sure default labels are up to date",
|
|
|
|
initialNode: &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "new-hostname",
|
|
|
|
kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "new-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "new-instance-type",
|
|
|
|
kubeletapis.LabelOS: "new-os",
|
|
|
|
kubeletapis.LabelArch: "new-arch",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
existingNode: &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "old-hostname",
|
|
|
|
kubeletapis.LabelZoneFailureDomain: "old-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "old-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "old-instance-type",
|
|
|
|
kubeletapis.LabelOS: "old-os",
|
|
|
|
kubeletapis.LabelArch: "old-arch",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
needsUpdate: true,
|
|
|
|
finalLabels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "new-hostname",
|
|
|
|
kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "new-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "new-instance-type",
|
|
|
|
kubeletapis.LabelOS: "new-os",
|
|
|
|
kubeletapis.LabelArch: "new-arch",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "make sure existing labels do not get deleted",
|
|
|
|
initialNode: &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "new-hostname",
|
|
|
|
kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "new-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "new-instance-type",
|
|
|
|
kubeletapis.LabelOS: "new-os",
|
|
|
|
kubeletapis.LabelArch: "new-arch",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
existingNode: &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "new-hostname",
|
|
|
|
kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "new-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "new-instance-type",
|
|
|
|
kubeletapis.LabelOS: "new-os",
|
|
|
|
kubeletapis.LabelArch: "new-arch",
|
|
|
|
"please-persist": "foo",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
needsUpdate: false,
|
|
|
|
finalLabels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "new-hostname",
|
2017-10-17 15:49:02 +00:00
|
|
|
kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "new-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "new-instance-type",
|
|
|
|
kubeletapis.LabelOS: "new-os",
|
|
|
|
kubeletapis.LabelArch: "new-arch",
|
|
|
|
"please-persist": "foo",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "make sure existing labels do not get deleted when initial node has no opinion",
|
|
|
|
initialNode: &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: map[string]string{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
existingNode: &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "new-hostname",
|
|
|
|
kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "new-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "new-instance-type",
|
|
|
|
kubeletapis.LabelOS: "new-os",
|
|
|
|
kubeletapis.LabelArch: "new-arch",
|
|
|
|
"please-persist": "foo",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
needsUpdate: false,
|
|
|
|
finalLabels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "new-hostname",
|
2017-06-06 11:47:40 +00:00
|
|
|
kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "new-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "new-instance-type",
|
|
|
|
kubeletapis.LabelOS: "new-os",
|
|
|
|
kubeletapis.LabelArch: "new-arch",
|
|
|
|
"please-persist": "foo",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "no update needed",
|
|
|
|
initialNode: &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "new-hostname",
|
|
|
|
kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "new-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "new-instance-type",
|
|
|
|
kubeletapis.LabelOS: "new-os",
|
|
|
|
kubeletapis.LabelArch: "new-arch",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
existingNode: &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "new-hostname",
|
|
|
|
kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "new-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "new-instance-type",
|
|
|
|
kubeletapis.LabelOS: "new-os",
|
|
|
|
kubeletapis.LabelArch: "new-arch",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
needsUpdate: false,
|
|
|
|
finalLabels: map[string]string{
|
|
|
|
kubeletapis.LabelHostname: "new-hostname",
|
|
|
|
kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain",
|
|
|
|
kubeletapis.LabelZoneRegion: "new-zone-region",
|
|
|
|
kubeletapis.LabelInstanceType: "new-instance-type",
|
|
|
|
kubeletapis.LabelOS: "new-os",
|
|
|
|
kubeletapis.LabelArch: "new-arch",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
defer testKubelet.Cleanup()
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
|
|
|
|
needsUpdate := kubelet.updateDefaultLabels(tc.initialNode, tc.existingNode)
|
|
|
|
assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
|
|
|
|
assert.Equal(t, tc.finalLabels, tc.existingNode.Labels, tc.name)
|
|
|
|
}
|
|
|
|
}
|
2017-06-03 04:43:31 +00:00
|
|
|
|
|
|
|
func TestValidateNodeIPParam(t *testing.T) {
|
|
|
|
type test struct {
|
|
|
|
nodeIP string
|
|
|
|
success bool
|
|
|
|
testName string
|
|
|
|
}
|
|
|
|
tests := []test{
|
|
|
|
{
|
|
|
|
nodeIP: "",
|
|
|
|
success: false,
|
|
|
|
testName: "IP not set",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
nodeIP: "127.0.0.1",
|
|
|
|
success: false,
|
|
|
|
testName: "IPv4 loopback address",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
nodeIP: "::1",
|
|
|
|
success: false,
|
|
|
|
testName: "IPv6 loopback address",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
nodeIP: "224.0.0.1",
|
|
|
|
success: false,
|
|
|
|
testName: "multicast IPv4 address",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
nodeIP: "ff00::1",
|
|
|
|
success: false,
|
|
|
|
testName: "multicast IPv6 address",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
nodeIP: "169.254.0.1",
|
|
|
|
success: false,
|
|
|
|
testName: "IPv4 link-local unicast address",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
nodeIP: "fe80::0202:b3ff:fe1e:8329",
|
|
|
|
success: false,
|
|
|
|
testName: "IPv6 link-local unicast address",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
nodeIP: "0.0.0.0",
|
|
|
|
success: false,
|
|
|
|
testName: "Unspecified IPv4 address",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
nodeIP: "::",
|
|
|
|
success: false,
|
|
|
|
testName: "Unspecified IPv6 address",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
nodeIP: "1.2.3.4",
|
|
|
|
success: false,
|
|
|
|
testName: "IPv4 address that doesn't belong to host",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
addrs, err := net.InterfaceAddrs()
|
|
|
|
if err != nil {
|
|
|
|
assert.Error(t, err, fmt.Sprintf(
|
|
|
|
"Unable to obtain a list of the node's unicast interface addresses."))
|
|
|
|
}
|
|
|
|
for _, addr := range addrs {
|
|
|
|
var ip net.IP
|
|
|
|
switch v := addr.(type) {
|
|
|
|
case *net.IPNet:
|
|
|
|
ip = v.IP
|
|
|
|
case *net.IPAddr:
|
|
|
|
ip = v.IP
|
|
|
|
}
|
|
|
|
if ip.IsLoopback() || ip.IsLinkLocalUnicast() {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
successTest := test{
|
|
|
|
nodeIP: ip.String(),
|
|
|
|
success: true,
|
|
|
|
testName: fmt.Sprintf("Success test case for address %s", ip.String()),
|
|
|
|
}
|
|
|
|
tests = append(tests, successTest)
|
|
|
|
}
|
|
|
|
for _, test := range tests {
|
|
|
|
err := validateNodeIP(net.ParseIP(test.nodeIP))
|
|
|
|
if test.success {
|
|
|
|
assert.NoError(t, err, "test %s", test.testName)
|
|
|
|
} else {
|
|
|
|
assert.Error(t, err, fmt.Sprintf("test %s", test.testName))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-05-23 16:51:29 +00:00
|
|
|
|
|
|
|
func TestSetVolumeLimits(t *testing.T) {
|
|
|
|
testKubelet := newTestKubeletWithoutFakeVolumePlugin(t, false /* controllerAttachDetachEnabled */)
|
|
|
|
defer testKubelet.Cleanup()
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
|
|
|
kubelet.hostname = testKubeletHostname
|
|
|
|
|
|
|
|
var testcases = []struct {
|
|
|
|
name string
|
|
|
|
cloudProviderName string
|
|
|
|
expectedVolumeKey string
|
|
|
|
expectedLimit int64
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "For default GCE cloudprovider",
|
|
|
|
cloudProviderName: "gce",
|
|
|
|
expectedVolumeKey: util.GCEVolumeLimitKey,
|
|
|
|
expectedLimit: 16,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "For default AWS Cloudprovider",
|
|
|
|
cloudProviderName: "aws",
|
|
|
|
expectedVolumeKey: util.EBSVolumeLimitKey,
|
|
|
|
expectedLimit: 39,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "for default Azure cloudprovider",
|
|
|
|
cloudProviderName: "azure",
|
|
|
|
expectedVolumeKey: util.AzureVolumeLimitKey,
|
|
|
|
expectedLimit: 16,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, test := range testcases {
|
|
|
|
node := &v1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Annotations: make(map[string]string)},
|
|
|
|
Spec: v1.NodeSpec{},
|
|
|
|
}
|
|
|
|
|
|
|
|
fakeCloud := &fakecloud.FakeCloud{
|
|
|
|
Provider: test.cloudProviderName,
|
|
|
|
Err: nil,
|
|
|
|
}
|
|
|
|
kubelet.cloud = fakeCloud
|
|
|
|
kubelet.cloudproviderRequestParallelism = make(chan int, 1)
|
|
|
|
kubelet.cloudproviderRequestSync = make(chan int)
|
|
|
|
kubelet.cloudproviderRequestTimeout = 10 * time.Second
|
|
|
|
kubelet.setVolumeLimits(node)
|
|
|
|
nodeLimits := []v1.ResourceList{}
|
|
|
|
nodeLimits = append(nodeLimits, node.Status.Allocatable)
|
|
|
|
nodeLimits = append(nodeLimits, node.Status.Capacity)
|
|
|
|
for _, volumeLimits := range nodeLimits {
|
|
|
|
fl, ok := volumeLimits[v1.ResourceName(test.expectedVolumeKey)]
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("Expected to found volume limit for %s found none", test.expectedVolumeKey)
|
|
|
|
}
|
|
|
|
foundLimit, _ := fl.AsInt64()
|
|
|
|
expectedValue := resource.NewQuantity(test.expectedLimit, resource.DecimalSI)
|
|
|
|
if expectedValue.Cmp(fl) != 0 {
|
|
|
|
t.Errorf("Expected volume limit for %s to be %v found %v", test.expectedVolumeKey, test.expectedLimit, foundLimit)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|