Use the assert/require package in kubelet unit tests

This reduce the lines of code and improve readability.
pull/6/head
Yu-Ju Hong 2017-03-06 12:30:31 -08:00
parent 61e7d1ebf1
commit b1e6e7f774
6 changed files with 147 additions and 343 deletions

View File

@ -20,6 +20,9 @@ import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"k8s.io/apimachinery/pkg/types"
@ -194,11 +197,10 @@ func TestGetContainerInfo(t *testing.T) {
fakeRuntime.PodList = tc.podList
stats, err := kubelet.GetContainerInfo(tc.requestedPodFullName, tc.requestedPodUid, tc.requestedContainerName, cadvisorReq)
if err != tc.expectedError {
t.Errorf("test '%s' failed: expected error %#v, got %#v", tc.name, tc.expectedError, err)
}
if tc.expectStats && stats == nil {
t.Fatalf("test '%s' failed: stats should not be nil", tc.name)
assert.Equal(t, tc.expectedError, err)
if tc.expectStats {
require.NotNil(t, stats)
}
mockCadvisor.AssertExpectations(t)
}
@ -219,9 +221,7 @@ func TestGetRawContainerInfoRoot(t *testing.T) {
mockCadvisor.On("ContainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil)
_, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, false)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
assert.NoError(t, err)
mockCadvisor.AssertExpectations(t)
}
@ -247,12 +247,8 @@ func TestGetRawContainerInfoSubcontainers(t *testing.T) {
mockCadvisor.On("SubcontainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil)
result, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, true)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(result) != 2 {
t.Errorf("Expected 2 elements, received: %#v", result)
}
assert.NoError(t, err)
assert.Len(t, result, 2)
mockCadvisor.AssertExpectations(t)
}
@ -282,11 +278,7 @@ func TestHasDedicatedImageFs(t *testing.T) {
mockCadvisor.On("ImagesFsInfo").Return(testCase.imageFsInfo, nil)
mockCadvisor.On("RootFsInfo").Return(testCase.rootFsInfo, nil)
actual, err := kubelet.HasDedicatedImageFs()
if err != nil {
t.Errorf("case: %s, unexpected error: %v", testName, err)
}
if actual != testCase.expected {
t.Errorf("case: %s, expected: %v, actual: %v", testName, testCase.expected, actual)
}
assert.NoError(t, err, "test [%s]", testName)
assert.Equal(t, testCase.expected, actual, "test [%s]", testName)
}
}

View File

@ -21,6 +21,9 @@ import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestKubeletDirs(t *testing.T) {
@ -33,57 +36,39 @@ func TestKubeletDirs(t *testing.T) {
got = kubelet.getPodsDir()
exp = filepath.Join(root, "pods")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
assert.Equal(t, exp, got)
got = kubelet.getPluginsDir()
exp = filepath.Join(root, "plugins")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
assert.Equal(t, exp, got)
got = kubelet.getPluginDir("foobar")
exp = filepath.Join(root, "plugins/foobar")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
assert.Equal(t, exp, got)
got = kubelet.getPodDir("abc123")
exp = filepath.Join(root, "pods/abc123")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
assert.Equal(t, exp, got)
got = kubelet.getPodVolumesDir("abc123")
exp = filepath.Join(root, "pods/abc123/volumes")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
assert.Equal(t, exp, got)
got = kubelet.getPodVolumeDir("abc123", "plugin", "foobar")
exp = filepath.Join(root, "pods/abc123/volumes/plugin/foobar")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
assert.Equal(t, exp, got)
got = kubelet.getPodPluginsDir("abc123")
exp = filepath.Join(root, "pods/abc123/plugins")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
assert.Equal(t, exp, got)
got = kubelet.getPodPluginDir("abc123", "foobar")
exp = filepath.Join(root, "pods/abc123/plugins/foobar")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
assert.Equal(t, exp, got)
got = kubelet.getPodContainerDir("abc123", "def456")
exp = filepath.Join(root, "pods/abc123/containers/def456")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
assert.Equal(t, exp, got)
}
func TestKubeletDirsCompat(t *testing.T) {
@ -91,91 +76,37 @@ func TestKubeletDirsCompat(t *testing.T) {
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
root := kubelet.rootDirectory
if err := os.MkdirAll(root, 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
var exp, got string
require.NoError(t, os.MkdirAll(root, 0750), "can't mkdir(%q)", root)
// Old-style pod dir.
if err := os.MkdirAll(fmt.Sprintf("%s/oldpod", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/oldpod", root), 0750), "can't mkdir(%q)", root)
// New-style pod dir.
if err := os.MkdirAll(fmt.Sprintf("%s/pods/newpod", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/pods/newpod", root), 0750), "can't mkdir(%q)", root)
// Both-style pod dir.
if err := os.MkdirAll(fmt.Sprintf("%s/bothpod", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
if err := os.MkdirAll(fmt.Sprintf("%s/pods/bothpod", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/bothpod", root), 0750), "can't mkdir(%q)", root)
require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/pods/bothpod", root), 0750), "can't mkdir(%q)", root)
got = kubelet.getPodDir("oldpod")
exp = filepath.Join(root, "oldpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodDir("newpod")
exp = filepath.Join(root, "pods/newpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodDir("bothpod")
exp = filepath.Join(root, "pods/bothpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodDir("neitherpod")
exp = filepath.Join(root, "pods/neitherpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
assert.Equal(t, filepath.Join(root, "oldpod"), kubelet.getPodDir("oldpod"))
assert.Equal(t, filepath.Join(root, "pods/newpod"), kubelet.getPodDir("newpod"))
assert.Equal(t, filepath.Join(root, "pods/bothpod"), kubelet.getPodDir("bothpod"))
assert.Equal(t, filepath.Join(root, "pods/neitherpod"), kubelet.getPodDir("neitherpod"))
root = kubelet.getPodDir("newpod")
// Old-style container dir.
if err := os.MkdirAll(fmt.Sprintf("%s/oldctr", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/oldctr", root), 0750), "can't mkdir(%q)", root)
// New-style container dir.
if err := os.MkdirAll(fmt.Sprintf("%s/containers/newctr", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/containers/newctr", root), 0750), "can't mkdir(%q)", root)
// Both-style container dir.
if err := os.MkdirAll(fmt.Sprintf("%s/bothctr", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
if err := os.MkdirAll(fmt.Sprintf("%s/containers/bothctr", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/bothctr", root), 0750), "can't mkdir(%q)", root)
require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/containers/bothctr", root), 0750), "can't mkdir(%q)", root)
got = kubelet.getPodContainerDir("newpod", "oldctr")
exp = filepath.Join(root, "oldctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodContainerDir("newpod", "newctr")
exp = filepath.Join(root, "containers/newctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodContainerDir("newpod", "bothctr")
exp = filepath.Join(root, "containers/bothctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodContainerDir("newpod", "neitherctr")
exp = filepath.Join(root, "containers/neitherctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
assert.Equal(t, filepath.Join(root, "oldctr"), kubelet.getPodContainerDir("newpod", "oldctr"))
assert.Equal(t, filepath.Join(root, "containers/newctr"), kubelet.getPodContainerDir("newpod", "newctr"))
assert.Equal(t, filepath.Join(root, "containers/bothctr"), kubelet.getPodContainerDir("newpod", "bothctr"))
assert.Equal(t, filepath.Join(root, "containers/neitherctr"), kubelet.getPodContainerDir("newpod", "neitherctr"))
}

View File

@ -19,10 +19,11 @@ package kubelet
import (
"fmt"
"net"
"reflect"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/bandwidth"
@ -61,10 +62,10 @@ func TestNodeIPParam(t *testing.T) {
for _, test := range tests {
kubelet.nodeIP = net.ParseIP(test.nodeIP)
err := kubelet.validateNodeIP()
if err != nil && test.success {
t.Errorf("Test: %s, expected no error but got: %v", test.testName, err)
} else if err == nil && !test.success {
t.Errorf("Test: %s, expected an error", test.testName)
if test.success {
assert.NoError(t, err, "test %s", test.testName)
} else {
assert.Error(t, err, fmt.Sprintf("test %s", test.testName))
}
}
}
@ -103,16 +104,9 @@ func TestParseResolvConf(t *testing.T) {
kubelet := testKubelet.kubelet
for i, tc := range testCases {
ns, srch, err := kubelet.parseResolvConf(strings.NewReader(tc.data))
if err != nil {
t.Errorf("expected success, got %v", err)
continue
}
if !reflect.DeepEqual(ns, tc.nameservers) {
t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns)
}
if !reflect.DeepEqual(srch, tc.searches) {
t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch)
}
require.NoError(t, err)
assert.EqualValues(t, tc.nameservers, ns, "test case [%d]: name servers", i)
assert.EqualValues(t, tc.searches, srch, "test case [%d] searches", i)
}
}
@ -179,17 +173,11 @@ func TestComposeDNSSearch(t *testing.T) {
for i, tc := range testCases {
dnsSearch := kubelet.formDNSSearch(tc.hostNames, pod)
if !reflect.DeepEqual(dnsSearch, tc.resultSearch) {
t.Errorf("[%d] expected search line %#v, got %#v", i, tc.resultSearch, dnsSearch)
}
assert.EqualValues(t, tc.resultSearch, dnsSearch, "test [%d]", i)
for _, expectedEvent := range tc.events {
expected := fmt.Sprintf("%s %s %s", v1.EventTypeWarning, "DNSSearchForming", expectedEvent)
event := fetchEvent(recorder)
if event != expected {
t.Errorf("[%d] expected event '%s', got '%s", i, expected, event)
}
assert.Equal(t, expected, event, "test [%d]", i)
}
}
}
@ -268,12 +256,8 @@ func TestCleanupBandwidthLimits(t *testing.T) {
}
err := testKube.kubelet.cleanupBandwidthLimits(test.pods)
if err != nil {
t.Errorf("unexpected error: %v (%s)", test.name, err)
}
if !reflect.DeepEqual(shaper.ResetCIDRs, test.expectResetCIDRs) {
t.Errorf("[%s]\nexpected: %v, saw: %v", test.name, test.expectResetCIDRs, shaper.ResetCIDRs)
}
assert.NoError(t, err, "test [%s]", test.name)
assert.EqualValues(t, test.expectResetCIDRs, shaper.ResetCIDRs, "test[%s]", test.name)
}
}
@ -293,8 +277,6 @@ func TestGetIPTablesMark(t *testing.T) {
}
for _, tc := range tests {
res := getIPTablesMark(tc.bit)
if res != tc.expect {
t.Errorf("getIPTablesMark output unexpected result: %v when input bit is %d. Expect result: %v", res, tc.bit, tc.expect)
}
assert.Equal(t, tc.expect, res, "input %d", tc.bit)
}
}

View File

@ -19,13 +19,15 @@ package kubelet
import (
"encoding/json"
"fmt"
"reflect"
goruntime "runtime"
"sort"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
apiequality "k8s.io/apimachinery/pkg/api/equality"
@ -153,9 +155,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Make kubelet report that it has sufficient disk space.
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
require.NoError(t, updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100))
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
@ -227,44 +227,25 @@ func TestUpdateNewNodeStatus(t *testing.T) {
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
assert.NoError(t, kubelet.updateNodeStatus())
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("patch", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
require.Len(t, actions, 2)
require.True(t, actions[1].Matches("patch", "nodes"))
require.Equal(t, actions[1].GetSubresource(), "status")
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
if err != nil {
t.Fatalf("can't apply node status patch: %v", err)
}
assert.NoError(t, err)
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type)
}
if cond.LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type)
}
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != v1.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if maxImagesInNodeStatus != len(updatedNode.Status.Images) {
t.Errorf("unexpected image list length in node status, expected: %v, got: %v", maxImagesInNodeStatus, len(updatedNode.Status.Images))
} else {
if !apiequality.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
}
}
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type, "NotReady should be last")
assert.Len(t, updatedNode.Status.Images, maxImagesInNodeStatus)
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
}
func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
@ -291,9 +272,8 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Make Kubelet report that it has sufficient disk space.
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100)
require.NoError(t, err, "update the disk space manager")
kubelet.outOfDiskTransitionFrequency = 10 * time.Second
@ -307,40 +287,27 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
assert.NoError(t, kubelet.updateNodeStatus())
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
// StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error)
if !actions[1].Matches("patch", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
require.Len(t, actions, 2)
require.True(t, actions[1].Matches("patch", "nodes"))
require.Equal(t, "status", actions[1].GetSubresource())
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
if err != nil {
t.Fatalf("can't apply node status patch: %v", err)
}
assert.NoError(t, err, "apply the node status patch")
var oodCondition v1.NodeCondition
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type)
}
if cond.LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type)
}
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
if cond.Type == v1.NodeOutOfDisk {
oodCondition = updatedNode.Status.Conditions[i]
}
}
if !reflect.DeepEqual(expectedNodeOutOfDiskCondition, oodCondition) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNodeOutOfDiskCondition, oodCondition))
}
assert.EqualValues(t, expectedNodeOutOfDiskCondition, oodCondition)
}
func TestUpdateExistingNodeStatus(t *testing.T) {
@ -424,9 +391,8 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Make kubelet report that it is out of disk space.
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100)
require.NoError(t, err, "update the disk space manager")
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
@ -508,41 +474,31 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
assert.NoError(t, kubelet.updateNodeStatus())
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Errorf("unexpected actions: %v", actions)
}
patchAction, ok := actions[1].(core.PatchActionImpl)
if !ok {
t.Errorf("unexpected action type. expected PatchActionImpl, got %#v", actions[1])
}
assert.Len(t, actions, 2)
assert.IsType(t, core.PatchActionImpl{}, actions[1])
patchAction := actions[1].(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
if !ok {
t.Fatalf("can't apply node status patch: %v", err)
}
require.NoError(t, err)
for i, cond := range updatedNode.Status.Conditions {
// Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same.
if old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; reflect.DeepEqual(cond.LastHeartbeatTime.Rfc3339Copy().UTC(), old) {
t.Errorf("Condition %v LastProbeTime: expected \n%v\n, got \n%v", cond.Type, metav1.Now(), old)
}
if got, want := cond.LastTransitionTime.Rfc3339Copy().UTC(), metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; !reflect.DeepEqual(got, want) {
t.Errorf("Condition %v LastTransitionTime: expected \n%#v\n, got \n%#v", cond.Type, want, got)
}
old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time
// Expect LastHearbeat to be updated to Now, while LastTransitionTime to be the same.
assert.NotEqual(t, old, cond.LastHeartbeatTime.Rfc3339Copy().UTC(), "LastHeartbeatTime for condition %v", cond.Type)
assert.EqualValues(t, old, cond.LastTransitionTime.Rfc3339Copy().UTC(), "LastTransitionTime for condition %v", cond.Type)
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != v1.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if !apiequality.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
}
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
"NodeReady should be the last condition")
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
}
func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
@ -670,25 +626,18 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T)
}
// Make kubelet report that it has sufficient disk space
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, tc.rootFsAvail, tc.dockerFsAvail, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, tc.rootFsAvail, tc.dockerFsAvail, 100, 100)
require.NoError(t, err, "can't update disk space manager")
assert.NoError(t, kubelet.updateNodeStatus())
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Errorf("%d. unexpected actions: %v", tcIdx, actions)
}
patchAction, ok := actions[1].(core.PatchActionImpl)
if !ok {
t.Errorf("%d. unexpected action type. expected PatchActionImpl, got %#v", tcIdx, actions[1])
}
assert.Len(t, actions, 2, "test [%d]", tcIdx)
assert.IsType(t, core.PatchActionImpl{}, actions[1])
patchAction := actions[1].(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
if err != nil {
t.Fatalf("can't apply node status patch: %v", err)
}
require.NoError(t, err, "can't apply node status patch")
kubeClient.ClearActions()
var oodCondition v1.NodeCondition
@ -697,10 +646,7 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T)
oodCondition = updatedNode.Status.Conditions[i]
}
}
if !reflect.DeepEqual(tc.expected, oodCondition) {
t.Errorf("%d.\nunexpected objects: %s", tcIdx, diff.ObjectDiff(tc.expected, oodCondition))
}
assert.EqualValues(t, tc.expected, oodCondition)
}
}
@ -737,9 +683,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Make kubelet report that it has sufficient disk space.
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
require.NoError(t, updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100))
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
@ -814,40 +758,28 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
checkNodeStatus := func(status v1.ConditionStatus, reason string) {
kubeClient.ClearActions()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
assert.NoError(t, kubelet.updateNodeStatus())
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("patch", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
require.Len(t, actions, 2)
require.True(t, actions[1].Matches("patch", "nodes"))
require.Equal(t, actions[1].GetSubresource(), "status")
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
if err != nil {
t.Fatalf("can't apply node status patch: %v", err)
}
require.NoError(t, err, "can't apply node status patch")
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp")
}
if cond.LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp")
}
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
lastIndex := len(updatedNode.Status.Conditions) - 1
if updatedNode.Status.Conditions[lastIndex].Type != v1.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if updatedNode.Status.Conditions[lastIndex].Message == "" {
t.Errorf("unexpected empty condition message")
}
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[lastIndex].Type, "NodeReady should be the last condition")
assert.NotEmpty(t, updatedNode.Status.Conditions[lastIndex].Message)
updatedNode.Status.Conditions[lastIndex].Message = ""
expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{
Type: v1.NodeReady,
@ -856,9 +788,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
}
if !apiequality.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
}
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
}
// TODO(random-liu): Refactor the unit test to be table driven test.
@ -936,13 +866,8 @@ func TestUpdateNodeStatusError(t *testing.T) {
kubelet := testKubelet.kubelet
// No matching node for the kubelet
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
if err := kubelet.updateNodeStatus(); err == nil {
t.Errorf("unexpected non error: %v", err)
}
if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry {
t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions())
}
assert.Error(t, kubelet.updateNodeStatus())
assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry)
}
func TestRegisterWithApiServer(t *testing.T) {
@ -998,7 +923,7 @@ func TestRegisterWithApiServer(t *testing.T) {
}()
select {
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("timed out waiting for registration")
assert.Fail(t, "timed out waiting for registration")
case <-done:
return
}
@ -1153,43 +1078,30 @@ func TestTryRegisterWithApiServer(t *testing.T) {
})
result := kubelet.tryRegisterWithApiServer(tc.newNode)
if e, a := tc.expectedResult, result; e != a {
t.Errorf("%v: unexpected result; expected %v got %v", tc.name, e, a)
continue
}
require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name)
actions := kubeClient.Actions()
if e, a := tc.expectedActions, len(actions); e != a {
t.Errorf("%v: unexpected number of actions, expected %v, got %v", tc.name, e, a)
}
assert.Len(t, actions, tc.expectedActions, "test [%s]", tc.name)
if tc.testSavedNode {
var savedNode *v1.Node
var ok bool
t.Logf("actions: %v: %+v", len(actions), actions)
action := actions[tc.savedNodeIndex]
if action.GetVerb() == "create" {
createAction := action.(core.CreateAction)
savedNode, ok = createAction.GetObject().(*v1.Node)
if !ok {
t.Errorf("%v: unexpected type; couldn't convert to *v1.Node: %+v", tc.name, createAction.GetObject())
continue
}
obj := createAction.GetObject()
require.IsType(t, &v1.Node{}, obj)
savedNode = obj.(*v1.Node)
} else if action.GetVerb() == "patch" {
patchAction := action.(core.PatchActionImpl)
var err error
savedNode, err = applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
if err != nil {
t.Errorf("can't apply node status patch: %v", err)
continue
}
require.NoError(t, err)
}
actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[volumehelper.ControllerManagedAttachAnnotation])
if e, a := tc.savedNodeCMAD, actualCMAD; e != a {
t.Errorf("%v: unexpected attach-detach value on saved node; expected %v got %v", tc.name, e, a)
}
assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name)
}
}
}

View File

@ -68,19 +68,13 @@ func TestListVolumesForPod(t *testing.T) {
podName := volumehelper.GetUniquePodName(pod)
volumesToReturn, volumeExsit := kubelet.ListVolumesForPod(types.UID(podName))
if !volumeExsit {
t.Errorf("Expected to find volumes for pod %q, but ListVolumesForPod find no volume", podName)
}
assert.True(t, volumeExsit, "expected to find volumes for pod %q", podName)
outerVolumeSpecName1 := "vol1"
if volumesToReturn[outerVolumeSpecName1] == nil {
t.Errorf("Value of map volumesToReturn is not expected to be nil, which key is : %s", outerVolumeSpecName1)
}
assert.NotNil(t, volumesToReturn[outerVolumeSpecName1], "key %s", outerVolumeSpecName1)
outerVolumeSpecName2 := "vol2"
if volumesToReturn[outerVolumeSpecName2] == nil {
t.Errorf("Value of map volumesToReturn is not expected to be nil, which key is : %s", outerVolumeSpecName2)
}
assert.NotNil(t, volumesToReturn[outerVolumeSpecName2], "key %s", outerVolumeSpecName2)
}
@ -154,18 +148,12 @@ func TestPodVolumesExist(t *testing.T) {
kubelet.podManager.SetPods(pods)
for _, pod := range pods {
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
assert.NoError(t, err)
}
for _, pod := range pods {
podVolumesExist := kubelet.podVolumesExist(pod.UID)
if !podVolumesExist {
t.Errorf(
"Expected to find volumes for pod %q, but podVolumesExist returned false",
pod.UID)
}
assert.True(t, podVolumesExist, "pod %q", pod.UID)
}
}

View File

@ -19,6 +19,8 @@ package kubelet
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/tools/record"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
@ -29,10 +31,7 @@ func TestBasic(t *testing.T) {
mockCadvisor := &cadvisortest.Fake{}
node := &v1.ObjectReference{}
oomWatcher := NewOOMWatcher(mockCadvisor, fakeRecorder)
err := oomWatcher.Start(node)
if err != nil {
t.Errorf("Should not have failed: %v", err)
}
assert.NoError(t, oomWatcher.Start(node))
// TODO: Improve this test once cadvisor exports events.EventChannel as an interface
// and thereby allow using a mock version of cadvisor.