Merge pull request #37370 from gmarek/test-refactor

Automatic merge from submit-queue

Make NodeController test utils usable from outside

Required to fix tests for #37365 and useful in general.
pull/6/head
Kubernetes Submit Queue 2016-12-05 06:38:32 -08:00 committed by GitHub
commit 4a91faa1b6
5 changed files with 163 additions and 115 deletions

View File

@ -20,18 +20,15 @@ go_library(
"metrics.go",
"nodecontroller.go",
"rate_limited_queue.go",
"test_utils.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/cloudprovider:go_default_library",
@ -39,9 +36,7 @@ go_library(
"//pkg/fields:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/clock:go_default_library",
"//pkg/util/errors:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/metrics:go_default_library",
@ -51,7 +46,6 @@ go_library(
"//pkg/util/system:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/version:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/prometheus/client_golang/prometheus",
],
@ -81,6 +75,7 @@ go_test(
"//pkg/cloudprovider/providers/fake:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/controller/node/testutil:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/diff:go_default_library",
"//pkg/util/flowcontrol:go_default_library",

View File

@ -23,6 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/controller/node/testutil"
"k8s.io/kubernetes/pkg/util/wait"
)
@ -30,9 +31,9 @@ const (
nodePollInterval = 100 * time.Millisecond
)
func waitForUpdatedNodeWithTimeout(nodeHandler *FakeNodeHandler, number int, timeout time.Duration) error {
func waitForUpdatedNodeWithTimeout(nodeHandler *testutil.FakeNodeHandler, number int, timeout time.Duration) error {
return wait.Poll(nodePollInterval, timeout, func() (bool, error) {
if len(nodeHandler.getUpdatedNodesCopy()) >= number {
if len(nodeHandler.GetUpdatedNodesCopy()) >= number {
return true, nil
}
return false, nil
@ -42,7 +43,7 @@ func waitForUpdatedNodeWithTimeout(nodeHandler *FakeNodeHandler, number int, tim
func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
testCases := []struct {
description string
fakeNodeHandler *FakeNodeHandler
fakeNodeHandler *testutil.FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
@ -51,7 +52,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
}{
{
description: "When there's no ServiceCIDR return first CIDR in range",
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -71,7 +72,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
},
{
description: "Correctly filter out ServiceCIDR",
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -95,7 +96,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
},
{
description: "Correctly ignore already allocated CIDRs",
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -121,7 +122,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
testFunc := func(tc struct {
description string
fakeNodeHandler *FakeNodeHandler
fakeNodeHandler *testutil.FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
@ -152,7 +153,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
}
found := false
seenCIDRs := []string{}
for _, updatedNode := range tc.fakeNodeHandler.getUpdatedNodesCopy() {
for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
if updatedNode.Spec.PodCIDR == tc.expectedAllocatedCIDR {
found = true
@ -173,7 +174,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
testCases := []struct {
description string
fakeNodeHandler *FakeNodeHandler
fakeNodeHandler *testutil.FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
@ -181,7 +182,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
}{
{
description: "When there's no ServiceCIDR return first CIDR in range",
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -203,7 +204,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
testFunc := func(tc struct {
description string
fakeNodeHandler *FakeNodeHandler
fakeNodeHandler *testutil.FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
@ -231,11 +232,11 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
}
// We don't expect any updates, so just sleep for some time
time.Sleep(time.Second)
if len(tc.fakeNodeHandler.getUpdatedNodesCopy()) != 0 {
t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.getUpdatedNodesCopy())
if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 {
t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy())
}
seenCIDRs := []string{}
for _, updatedNode := range tc.fakeNodeHandler.getUpdatedNodesCopy() {
for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
if updatedNode.Spec.PodCIDR != "" {
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
}
@ -253,7 +254,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
func TestReleaseCIDRSuccess(t *testing.T) {
testCases := []struct {
description string
fakeNodeHandler *FakeNodeHandler
fakeNodeHandler *testutil.FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
@ -264,7 +265,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
}{
{
description: "Correctly release preallocated CIDR",
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -287,7 +288,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
},
{
description: "Correctly recycle CIDR",
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -311,7 +312,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
testFunc := func(tc struct {
description string
fakeNodeHandler *FakeNodeHandler
fakeNodeHandler *testutil.FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
@ -351,8 +352,8 @@ func TestReleaseCIDRSuccess(t *testing.T) {
}
// We don't expect any updates here
time.Sleep(time.Second)
if len(tc.fakeNodeHandler.getUpdatedNodesCopy()) != 0 {
t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.getUpdatedNodesCopy())
if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 {
t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy())
}
}
@ -378,7 +379,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
found := false
seenCIDRs := []string{}
for _, updatedNode := range tc.fakeNodeHandler.getUpdatedNodesCopy() {
for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
if updatedNode.Spec.PodCIDR == tc.expectedAllocatedCIDRSecondRound {
found = true

View File

@ -35,6 +35,7 @@ import (
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/node/testutil"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/diff"
"k8s.io/kubernetes/pkg/util/node"
@ -98,7 +99,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
}
table := []struct {
fakeNodeHandler *FakeNodeHandler
fakeNodeHandler *testutil.FakeNodeHandler
daemonSets []extensions.DaemonSet
timeToPass time.Duration
newNodeStatus v1.NodeStatus
@ -108,7 +109,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
}{
// Node created recently, with no status (happens only at cluster startup).
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -141,7 +142,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
daemonSets: nil,
timeToPass: 0,
@ -152,7 +153,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
},
// Node created long time ago, and kubelet posted NotReady for a short period of time.
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -195,7 +196,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
daemonSets: nil,
timeToPass: evictionTimeout,
@ -216,7 +217,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
},
// Pod is ds-managed, and kubelet posted NotReady for a long period of time.
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -307,7 +308,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
},
// Node created long time ago, and kubelet posted NotReady for a long period of time.
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -350,7 +351,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
daemonSets: nil,
timeToPass: time.Hour,
@ -371,7 +372,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
},
// Node created long time ago, node controller posted Unknown for a short period of time.
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -414,7 +415,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
daemonSets: nil,
timeToPass: evictionTimeout - testNodeMonitorGracePeriod,
@ -435,7 +436,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
},
// Node created long time ago, node controller posted Unknown for a long period of time.
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -478,7 +479,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
daemonSets: nil,
timeToPass: 60 * time.Minute,
@ -518,7 +519,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
if err := nodeController.monitorNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
zones := getZones(item.fakeNodeHandler)
zones := testutil.GetZones(item.fakeNodeHandler)
for _, zone := range zones {
nodeController.zonePodEvictor[zone].Try(func(value TimedValue) (bool, time.Duration) {
nodeUid, _ := value.UID.(string)
@ -562,7 +563,7 @@ func TestPodStatusChange(t *testing.T) {
// Node created long time ago, node controller posted Unknown for a long period of time.
table := []struct {
fakeNodeHandler *FakeNodeHandler
fakeNodeHandler *testutil.FakeNodeHandler
daemonSets []extensions.DaemonSet
timeToPass time.Duration
newNodeStatus v1.NodeStatus
@ -572,7 +573,7 @@ func TestPodStatusChange(t *testing.T) {
description string
}{
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -615,7 +616,7 @@ func TestPodStatusChange(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
timeToPass: 60 * time.Minute,
newNodeStatus: v1.NodeStatus{
@ -653,7 +654,7 @@ func TestPodStatusChange(t *testing.T) {
if err := nodeController.monitorNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
zones := getZones(item.fakeNodeHandler)
zones := testutil.GetZones(item.fakeNodeHandler)
for _, zone := range zones {
nodeController.zonePodEvictor[zone].Try(func(value TimedValue) (bool, time.Duration) {
nodeUid, _ := value.UID.(string)
@ -764,13 +765,13 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
},
},
},
podList: []v1.Pod{*newPod("pod0", "node0")},
podList: []v1.Pod{*testutil.NewPod("pod0", "node0")},
updatedNodeStatuses: []v1.NodeStatus{
unhealthyNodeNewStatus,
unhealthyNodeNewStatus,
},
expectedInitialStates: map[string]zoneState{createZoneID("region1", "zone1"): stateFullDisruption},
expectedFollowingStates: map[string]zoneState{createZoneID("region1", "zone1"): stateFullDisruption},
expectedInitialStates: map[string]zoneState{testutil.CreateZoneID("region1", "zone1"): stateFullDisruption},
expectedFollowingStates: map[string]zoneState{testutil.CreateZoneID("region1", "zone1"): stateFullDisruption},
expectedEvictPods: false,
description: "Network Disruption: Only zone is down - eviction shouldn't take place.",
},
@ -820,18 +821,18 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
},
},
podList: []v1.Pod{*newPod("pod0", "node0")},
podList: []v1.Pod{*testutil.NewPod("pod0", "node0")},
updatedNodeStatuses: []v1.NodeStatus{
unhealthyNodeNewStatus,
unhealthyNodeNewStatus,
},
expectedInitialStates: map[string]zoneState{
createZoneID("region1", "zone1"): stateFullDisruption,
createZoneID("region2", "zone2"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region2", "zone2"): stateFullDisruption,
},
expectedFollowingStates: map[string]zoneState{
createZoneID("region1", "zone1"): stateFullDisruption,
createZoneID("region2", "zone2"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region2", "zone2"): stateFullDisruption,
},
expectedEvictPods: false,
description: "Network Disruption: Both zones down - eviction shouldn't take place.",
@ -881,18 +882,18 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
},
},
},
podList: []v1.Pod{*newPod("pod0", "node0")},
podList: []v1.Pod{*testutil.NewPod("pod0", "node0")},
updatedNodeStatuses: []v1.NodeStatus{
unhealthyNodeNewStatus,
healthyNodeNewStatus,
},
expectedInitialStates: map[string]zoneState{
createZoneID("region1", "zone1"): stateFullDisruption,
createZoneID("region1", "zone2"): stateNormal,
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone2"): stateNormal,
},
expectedFollowingStates: map[string]zoneState{
createZoneID("region1", "zone1"): stateFullDisruption,
createZoneID("region1", "zone2"): stateNormal,
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone2"): stateNormal,
},
expectedEvictPods: true,
description: "Network Disruption: One zone is down - eviction should take place.",
@ -942,16 +943,16 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
},
},
},
podList: []v1.Pod{*newPod("pod0", "node0")},
podList: []v1.Pod{*testutil.NewPod("pod0", "node0")},
updatedNodeStatuses: []v1.NodeStatus{
unhealthyNodeNewStatus,
healthyNodeNewStatus,
},
expectedInitialStates: map[string]zoneState{
createZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
},
expectedFollowingStates: map[string]zoneState{
createZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
},
expectedEvictPods: false,
description: "NetworkDisruption: eviction should stop, only -master Node is healthy",
@ -1002,18 +1003,18 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
},
},
podList: []v1.Pod{*newPod("pod0", "node0")},
podList: []v1.Pod{*testutil.NewPod("pod0", "node0")},
updatedNodeStatuses: []v1.NodeStatus{
unhealthyNodeNewStatus,
healthyNodeNewStatus,
},
expectedInitialStates: map[string]zoneState{
createZoneID("region1", "zone1"): stateFullDisruption,
createZoneID("region1", "zone2"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone2"): stateFullDisruption,
},
expectedFollowingStates: map[string]zoneState{
createZoneID("region1", "zone1"): stateFullDisruption,
createZoneID("region1", "zone2"): stateNormal,
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone2"): stateNormal,
},
expectedEvictPods: true,
description: "Initially both zones down, one comes back - eviction should take place",
@ -1124,7 +1125,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
},
},
podList: []v1.Pod{*newPod("pod0", "node0")},
podList: []v1.Pod{*testutil.NewPod("pod0", "node0")},
updatedNodeStatuses: []v1.NodeStatus{
unhealthyNodeNewStatus,
unhealthyNodeNewStatus,
@ -1133,10 +1134,10 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
healthyNodeNewStatus,
},
expectedInitialStates: map[string]zoneState{
createZoneID("region1", "zone1"): statePartialDisruption,
testutil.CreateZoneID("region1", "zone1"): statePartialDisruption,
},
expectedFollowingStates: map[string]zoneState{
createZoneID("region1", "zone1"): statePartialDisruption,
testutil.CreateZoneID("region1", "zone1"): statePartialDisruption,
},
expectedEvictPods: true,
description: "Zone is partially disrupted - eviction should take place.",
@ -1144,7 +1145,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
}
for _, item := range table {
fakeNodeHandler := &FakeNodeHandler{
fakeNodeHandler := &testutil.FakeNodeHandler{
Existing: item.nodeList,
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: item.podList}),
}
@ -1184,7 +1185,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
t.Errorf("%v: Unexpected zone state: %v: %v instead %v", item.description, zone, nodeController.zoneStates[zone], state)
}
}
zones := getZones(fakeNodeHandler)
zones := testutil.GetZones(fakeNodeHandler)
for _, zone := range zones {
nodeController.zonePodEvictor[zone].Try(func(value TimedValue) (bool, time.Duration) {
uid, _ := value.UID.(string)
@ -1211,7 +1212,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
// pods and the node when kubelet has not reported, and the cloudprovider says
// the node is gone.
func TestCloudProviderNoRateLimit(t *testing.T) {
fnh := &FakeNodeHandler{
fnh := &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -1230,8 +1231,8 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0"), *newPod("pod1", "node0")}}),
deleteWaitChan: make(chan struct{}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0"), *testutil.NewPod("pod1", "node0")}}),
DeleteWaitChan: make(chan struct{}),
}
nodeController, _ := NewNodeControllerFromClient(nil, fnh, 10*time.Minute,
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealtyThreshold,
@ -1247,7 +1248,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
t.Errorf("unexpected error: %v", err)
}
select {
case <-fnh.deleteWaitChan:
case <-fnh.DeleteWaitChan:
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Timed out waiting %v for node to be deleted", wait.ForeverTestTimeout)
}
@ -1262,7 +1263,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
table := []struct {
fakeNodeHandler *FakeNodeHandler
fakeNodeHandler *testutil.FakeNodeHandler
timeToPass time.Duration
newNodeStatus v1.NodeStatus
expectedEvictPods bool
@ -1272,7 +1273,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
// Node created long time ago, without status:
// Expect Unknown status posted from node controller.
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -1281,7 +1282,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
expectedRequestCount: 2, // List+Update
expectedNodes: []*v1.Node{
@ -1316,7 +1317,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
// Node created recently, without status.
// Expect no action from node controller (within startup grace period).
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -1325,7 +1326,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
expectedRequestCount: 1, // List
expectedNodes: nil,
@ -1333,7 +1334,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
// Node created long time ago, with status updated by kubelet exceeds grace period.
// Expect Unknown status posted from node controller.
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -1367,7 +1368,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
expectedRequestCount: 3, // (List+)List+Update
timeToPass: time.Hour,
@ -1432,7 +1433,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
// Node created long time ago, with status updated recently.
// Expect no action from node controller (within monitor grace period).
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -1459,7 +1460,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
expectedRequestCount: 1, // List
expectedNodes: nil,
@ -1496,7 +1497,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
table := []struct {
fakeNodeHandler *FakeNodeHandler
fakeNodeHandler *testutil.FakeNodeHandler
timeToPass time.Duration
newNodeStatus v1.NodeStatus
expectedPodStatusUpdate bool
@ -1504,7 +1505,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
// Node created recently, without status.
// Expect no action from node controller (within startup grace period).
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -1513,14 +1514,14 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
expectedPodStatusUpdate: false,
},
// Node created long time ago, with status updated recently.
// Expect no action from node controller (within monitor grace period).
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -1547,14 +1548,14 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
expectedPodStatusUpdate: false,
},
// Node created long time ago, with status updated by kubelet exceeds grace period.
// Expect pods status updated and Unknown node status posted from node controller
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -1591,7 +1592,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
timeToPass: 1 * time.Minute,
newNodeStatus: v1.NodeStatus{
@ -1624,7 +1625,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
// Node created long time ago, with outdated kubelet version 1.1.0 and status
// updated by kubelet exceeds grace period. Expect no action from node controller.
{
fakeNodeHandler: &FakeNodeHandler{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -1661,7 +1662,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
timeToPass: 1 * time.Minute,
newNodeStatus: v1.NodeStatus{
@ -1723,7 +1724,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
func TestNodeEventGeneration(t *testing.T) {
fakeNow := metav1.Date(2016, 9, 10, 12, 0, 0, 0, time.UTC)
fakeNodeHandler := &FakeNodeHandler{
fakeNodeHandler := &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: v1.ObjectMeta{
@ -1746,7 +1747,7 @@ func TestNodeEventGeneration(t *testing.T) {
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*newPod("pod0", "node0")}}),
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
}
nodeController, _ := NewNodeControllerFromClient(nil, fakeNodeHandler, 5*time.Minute,
@ -1758,22 +1759,22 @@ func TestNodeEventGeneration(t *testing.T) {
return false, nil
}
nodeController.now = func() metav1.Time { return fakeNow }
fakeRecorder := NewFakeRecorder()
fakeRecorder := testutil.NewFakeRecorder()
nodeController.recorder = fakeRecorder
if err := nodeController.monitorNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(fakeRecorder.events) != 2 {
t.Fatalf("unexpected events, got %v, expected %v: %+v", len(fakeRecorder.events), 2, fakeRecorder.events)
if len(fakeRecorder.Events) != 2 {
t.Fatalf("unexpected events, got %v, expected %v: %+v", len(fakeRecorder.Events), 2, fakeRecorder.Events)
}
if fakeRecorder.events[0].Reason != "RegisteredNode" || fakeRecorder.events[1].Reason != "DeletingNode" {
if fakeRecorder.Events[0].Reason != "RegisteredNode" || fakeRecorder.Events[1].Reason != "DeletingNode" {
var reasons []string
for _, event := range fakeRecorder.events {
for _, event := range fakeRecorder.Events {
reasons = append(reasons, event.Reason)
}
t.Fatalf("unexpected events generation: %v", strings.Join(reasons, ","))
}
for _, event := range fakeRecorder.events {
for _, event := range fakeRecorder.Events {
involvedObject := event.InvolvedObject
actualUID := string(involvedObject.UID)
if actualUID != "1234567890" {

View File

@ -0,0 +1,31 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["test_utils.go"],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/util/clock:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/watch:go_default_library",
],
)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package node
package testutil
import (
"errors"
@ -56,7 +56,7 @@ type FakeNodeHandler struct {
// Synchronization
lock sync.Mutex
deleteWaitChan chan struct{}
DeleteWaitChan chan struct{}
}
type FakeLegacyHandler struct {
@ -64,7 +64,8 @@ type FakeLegacyHandler struct {
n *FakeNodeHandler
}
func (c *FakeNodeHandler) getUpdatedNodesCopy() []*v1.Node {
// GetUpdatedNodesCopy returns a slice of Nodes with updates applied.
func (c *FakeNodeHandler) GetUpdatedNodesCopy() []*v1.Node {
c.lock.Lock()
defer c.lock.Unlock()
updatedNodesCopy := make([]*v1.Node, len(c.UpdatedNodes), len(c.UpdatedNodes))
@ -74,14 +75,17 @@ func (c *FakeNodeHandler) getUpdatedNodesCopy() []*v1.Node {
return updatedNodesCopy
}
// Core returns fake CoreInterface.
func (c *FakeNodeHandler) Core() v1core.CoreV1Interface {
return &FakeLegacyHandler{c.Clientset.Core(), c}
}
// Nodes return fake NodeInterfaces.
func (m *FakeLegacyHandler) Nodes() v1core.NodeInterface {
return m.n
}
// Create adds a new Node to the fake store.
func (m *FakeNodeHandler) Create(node *v1.Node) (*v1.Node, error) {
m.lock.Lock()
defer func() {
@ -102,6 +106,7 @@ func (m *FakeNodeHandler) Create(node *v1.Node) (*v1.Node, error) {
}
}
// Get returns a Node from the fake store.
func (m *FakeNodeHandler) Get(name string) (*v1.Node, error) {
m.lock.Lock()
defer func() {
@ -123,6 +128,7 @@ func (m *FakeNodeHandler) Get(name string) (*v1.Node, error) {
return nil, nil
}
// List returns a list of Nodes from the fake store.
func (m *FakeNodeHandler) List(opts v1.ListOptions) (*v1.NodeList, error) {
m.lock.Lock()
defer func() {
@ -152,23 +158,26 @@ func (m *FakeNodeHandler) List(opts v1.ListOptions) (*v1.NodeList, error) {
return nodeList, nil
}
// Delete delets a Node from the fake store.
func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error {
m.lock.Lock()
defer func() {
m.RequestCount++
if m.deleteWaitChan != nil {
m.deleteWaitChan <- struct{}{}
if m.DeleteWaitChan != nil {
m.DeleteWaitChan <- struct{}{}
}
m.lock.Unlock()
}()
m.DeletedNodes = append(m.DeletedNodes, newNode(id))
m.DeletedNodes = append(m.DeletedNodes, NewNode(id))
return nil
}
// DeleteCollection deletes a collection of Nodes from the fake store.
func (m *FakeNodeHandler) DeleteCollection(opt *v1.DeleteOptions, listOpts v1.ListOptions) error {
return nil
}
// Update updates a Node in the fake store.
func (m *FakeNodeHandler) Update(node *v1.Node) (*v1.Node, error) {
m.lock.Lock()
defer func() {
@ -186,6 +195,7 @@ func (m *FakeNodeHandler) Update(node *v1.Node) (*v1.Node, error) {
return node, nil
}
// UpdateStatus updates a status of a Node in the fake store.
func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) {
m.lock.Lock()
defer func() {
@ -197,15 +207,18 @@ func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) {
return node, nil
}
// PatchStatus patches a status of a Node in the fake store.
func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*v1.Node, error) {
m.RequestCount++
return &v1.Node{}, nil
}
// Watch watches Nodes in a fake store.
func (m *FakeNodeHandler) Watch(opts v1.ListOptions) (watch.Interface, error) {
return watch.NewFake(), nil
}
// Patch patches a Node in the fake store.
func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
return nil, nil
}
@ -213,18 +226,21 @@ func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subr
// FakeRecorder is used as a fake during testing.
type FakeRecorder struct {
source v1.EventSource
events []*v1.Event
Events []*v1.Event
clock clock.Clock
}
// Event emits a fake event to the fake recorder
func (f *FakeRecorder) Event(obj runtime.Object, eventtype, reason, message string) {
f.generateEvent(obj, metav1.Now(), eventtype, reason, message)
}
// Eventf emits a fake formatted event to the fake recorder
func (f *FakeRecorder) Eventf(obj runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
f.Event(obj, eventtype, reason, fmt.Sprintf(messageFmt, args...))
}
// PastEventf is a no-op
func (f *FakeRecorder) PastEventf(obj runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) {
}
@ -235,9 +251,9 @@ func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp metav1.Time,
}
event := f.makeEvent(ref, eventtype, reason, message)
event.Source = f.source
if f.events != nil {
if f.Events != nil {
fmt.Println("write event")
f.events = append(f.events, event)
f.Events = append(f.Events, event)
}
}
@ -263,15 +279,17 @@ func (f *FakeRecorder) makeEvent(ref *v1.ObjectReference, eventtype, reason, mes
}
}
// NewFakeRecorder returns a pointer to a newly constructed FakeRecorder.
func NewFakeRecorder() *FakeRecorder {
return &FakeRecorder{
source: v1.EventSource{Component: "nodeControllerTest"},
events: []*v1.Event{},
Events: []*v1.Event{},
clock: clock.NewFakeClock(time.Now()),
}
}
func newNode(name string) *v1.Node {
// NewNode is a helper function for creating Nodes for testing.
func NewNode(name string) *v1.Node {
return &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: name},
Spec: v1.NodeSpec{
@ -286,7 +304,8 @@ func newNode(name string) *v1.Node {
}
}
func newPod(name, host string) *v1.Pod {
// NewPod is a helper function for creating Pods for testing.
func NewPod(name, host string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
@ -317,8 +336,8 @@ func contains(node *v1.Node, nodes []*v1.Node) bool {
return false
}
// Returns list of zones for all Nodes stored in FakeNodeHandler
func getZones(nodeHandler *FakeNodeHandler) []string {
// GetZones returns list of zones for all Nodes stored in FakeNodeHandler
func GetZones(nodeHandler *FakeNodeHandler) []string {
nodes, _ := nodeHandler.List(v1.ListOptions{})
zones := sets.NewString()
for _, node := range nodes.Items {
@ -327,6 +346,7 @@ func getZones(nodeHandler *FakeNodeHandler) []string {
return zones.List()
}
func createZoneID(region, zone string) string {
// CreateZoneID returns a single zoneID for a given region and zone.
func CreateZoneID(region, zone string) string {
return region + ":\x00:" + zone
}