fix go vet errors

Signed-off-by: Jess Frazelle <jessfraz@google.com>

fix composites

Signed-off-by: Jess Frazelle <me@jessfraz.com>
pull/6/head
Jess Frazelle 2016-07-25 14:34:05 -07:00 committed by Jess Frazelle
parent eef8bfec23
commit 7e9d82129e
No known key found for this signature in database
GPG Key ID: 18F3685C0022BFF3
27 changed files with 249 additions and 235 deletions

View File

@ -62,7 +62,7 @@ func TestProcessServiceUpdate(t *testing.T) {
&cachedService{
lastState: &v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo1": {Ingress: []v1.LoadBalancerIngress{{"ip1", ""}}},
"foo1": {Ingress: []v1.LoadBalancerIngress{{IP: "ip1", Hostname: ""}}},
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
@ -77,7 +77,7 @@ func TestProcessServiceUpdate(t *testing.T) {
ObjectMeta: v1.ObjectMeta{Name: "bar1"},
},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo2": {Ingress: []v1.LoadBalancerIngress{{"ip1", ""}}},
"foo2": {Ingress: []v1.LoadBalancerIngress{{IP: "ip1", Hostname: ""}}},
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
@ -127,7 +127,7 @@ func TestProcessServiceDeletion(t *testing.T) {
&cachedService{
lastState: &v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},
serviceStatusMap: map[string]v1.LoadBalancerStatus{
"foo1": {Ingress: []v1.LoadBalancerIngress{{"ip1", ""}}},
"foo1": {Ingress: []v1.LoadBalancerIngress{{IP: "ip1", Hostname: ""}}},
},
},
&v1.Service{Status: v1.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}},

View File

@ -42,8 +42,8 @@ for arg; do
done
if [[ ${#targets[@]} -eq 0 ]]; then
# Do not run on third_party directories.
targets=$(go list -e ./... | egrep -v "/(third_party|vendor|staging)/")
# Do not run on third_party directories or generated client code.
targets=$(go list -e ./... | egrep -v "/(third_party|vendor|staging|clientset_generated)/")
fi
go vet "${goflags[@]:+${goflags[@]}}" ${targets[@]}

View File

@ -2209,8 +2209,8 @@ func TestValidateHandler(t *testing.T) {
{HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromInt(1), Host: "", Scheme: "HTTP"}},
{HTTPGet: &api.HTTPGetAction{Path: "/foo", Port: intstr.FromInt(65535), Host: "host", Scheme: "HTTP"}},
{HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP"}},
{HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []api.HTTPHeader{{"Host", "foo.example.com"}}}},
{HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []api.HTTPHeader{{"X-Forwarded-For", "1.2.3.4"}, {"X-Forwarded-For", "5.6.7.8"}}}},
{HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []api.HTTPHeader{{Name: "Host", Value: "foo.example.com"}}}},
{HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []api.HTTPHeader{{Name: "X-Forwarded-For", Value: "1.2.3.4"}, {Name: "X-Forwarded-For", Value: "5.6.7.8"}}}},
}
for _, h := range successCases {
if errs := validateHandler(&h, field.NewPath("field")); len(errs) != 0 {
@ -2224,8 +2224,8 @@ func TestValidateHandler(t *testing.T) {
{HTTPGet: &api.HTTPGetAction{Path: "", Port: intstr.FromInt(0), Host: ""}},
{HTTPGet: &api.HTTPGetAction{Path: "/foo", Port: intstr.FromInt(65536), Host: "host"}},
{HTTPGet: &api.HTTPGetAction{Path: "", Port: intstr.FromString(""), Host: ""}},
{HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []api.HTTPHeader{{"Host:", "foo.example.com"}}}},
{HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []api.HTTPHeader{{"X_Forwarded_For", "foo.example.com"}}}},
{HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []api.HTTPHeader{{Name: "Host:", Value: "foo.example.com"}}}},
{HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []api.HTTPHeader{{Name: "X_Forwarded_For", Value: "foo.example.com"}}}},
}
for _, h := range errorCases {
if errs := validateHandler(&h, field.NewPath("field")); len(errs) == 0 {

View File

@ -286,14 +286,14 @@ func TestReflectorListAndWatchWithErrors(t *testing.T) {
{
list: mkList("1"),
events: []watch.Event{
{watch.Added, mkPod("foo", "2")},
{watch.Added, mkPod("bar", "3")},
{Type: watch.Added, Object: mkPod("foo", "2")},
{Type: watch.Added, Object: mkPod("bar", "3")},
},
}, {
list: mkList("3", mkPod("foo", "2"), mkPod("bar", "3")),
events: []watch.Event{
{watch.Deleted, mkPod("foo", "4")},
{watch.Added, mkPod("qux", "5")},
{Type: watch.Deleted, Object: mkPod("foo", "4")},
{Type: watch.Added, Object: mkPod("qux", "5")},
},
}, {
listErr: fmt.Errorf("a list error"),
@ -303,7 +303,7 @@ func TestReflectorListAndWatchWithErrors(t *testing.T) {
}, {
list: mkList("5", mkPod("bar", "3"), mkPod("qux", "5")),
events: []watch.Event{
{watch.Added, mkPod("baz", "6")},
{Type: watch.Added, Object: mkPod("baz", "6")},
},
}, {
list: mkList("6", mkPod("bar", "3"), mkPod("qux", "5"), mkPod("baz", "6")),

View File

@ -154,17 +154,17 @@ func TestGetServerResources(t *testing.T) {
stable := unversioned.APIResourceList{
GroupVersion: "v1",
APIResources: []unversioned.APIResource{
{"pods", true, "Pod"},
{"services", true, "Service"},
{"namespaces", false, "Namespace"},
{Name: "pods", Namespaced: true, Kind: "Pod"},
{Name: "services", Namespaced: true, Kind: "Service"},
{Name: "namespaces", Namespaced: false, Kind: "Namespace"},
},
}
beta := unversioned.APIResourceList{
GroupVersion: "extensions/v1",
APIResources: []unversioned.APIResource{
{"deployments", true, "Deployment"},
{"ingresses", true, "Ingress"},
{"jobs", true, "Job"},
{Name: "deployments", Namespaced: true, Kind: "Deployment"},
{Name: "ingresses", Namespaced: true, Kind: "Ingress"},
{Name: "jobs", Namespaced: true, Kind: "Job"},
},
}
tests := []struct {

View File

@ -314,7 +314,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
metrics := heapster.MetricResultList{}
for _, level := range tc.reportedLevels {
metric := heapster.MetricResult{
Metrics: []heapster.MetricPoint{{timestamp, level, nil}},
Metrics: []heapster.MetricPoint{{Timestamp: timestamp, Value: level, FloatValue: nil}},
LatestTimestamp: timestamp,
}
metrics.Items = append(metrics.Items, metric)

View File

@ -442,9 +442,9 @@ func TestAggregateSum(t *testing.T) {
Items: []heapster.MetricResult{
{
Metrics: []heapster.MetricPoint{
{now, 50, nil},
{now.Add(-15 * time.Second), 100, nil},
{now.Add(-60 * time.Second), 100000, nil}},
{Timestamp: now, Value: 50, FloatValue: nil},
{Timestamp: now.Add(-15 * time.Second), Value: 100, FloatValue: nil},
{Timestamp: now.Add(-60 * time.Second), Value: 100000, FloatValue: nil}},
LatestTimestamp: now,
},
},
@ -461,8 +461,8 @@ func TestAggregateSumSingle(t *testing.T) {
Items: []heapster.MetricResult{
{
Metrics: []heapster.MetricPoint{
{now, 50, nil},
{now.Add(-65 * time.Second), 100000, nil}},
{Timestamp: now, Value: 50, FloatValue: nil},
{Timestamp: now.Add(-65 * time.Second), Value: 100000, FloatValue: nil}},
LatestTimestamp: now,
},
},

View File

@ -37,6 +37,9 @@ func (r *TestRuntimeCache) GetCachedPods() []*Pod {
}
func NewTestRuntimeCache(getter podsGetter) *TestRuntimeCache {
c, _ := NewRuntimeCache(getter)
return &TestRuntimeCache{*c.(*runtimeCache)}
return &TestRuntimeCache{
runtimeCache: runtimeCache{
getter: getter,
},
}
}

View File

@ -604,9 +604,9 @@ func (dm *DockerManager) runContainer(
// Experimental. For now, we hardcode /dev/nvidia0 no matter what the user asks for
// (we only support one device per node).
devices = []dockercontainer.DeviceMapping{
{"/dev/nvidia0", "/dev/nvidia0", "mrw"},
{"/dev/nvidiactl", "/dev/nvidiactl", "mrw"},
{"/dev/nvidia-uvm", "/dev/nvidia-uvm", "mrw"},
{PathOnHost: "/dev/nvidia0", PathInContainer: "/dev/nvidia0", CgroupPermissions: "mrw"},
{PathOnHost: "/dev/nvidiactl", PathInContainer: "/dev/nvidiactl", CgroupPermissions: "mrw"},
{PathOnHost: "/dev/nvidia-uvm", PathInContainer: "/dev/nvidia-uvm", CgroupPermissions: "mrw"},
}
}
podHasSELinuxLabel := pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SELinuxOptions != nil

View File

@ -277,25 +277,33 @@ func TestPullWithSecrets(t *testing.T) {
"default keyring secrets": {
"ubuntu",
[]api.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email", nil}}),
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{
"index.docker.io/v1/": {Username: "built-in", Password: "password", Email: "email", Provider: nil},
}),
[]string{`ubuntu:latest using {"username":"built-in","password":"password","email":"email"}`},
},
"default keyring secrets unused": {
"ubuntu",
[]api.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"extraneous": {"built-in", "password", "email", nil}}),
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{
"extraneous": {Username: "built-in", Password: "password", Email: "email", Provider: nil},
}),
[]string{`ubuntu:latest using {}`},
},
"builtin keyring secrets, but use passed": {
"ubuntu",
[]api.Secret{{Type: api.SecretTypeDockercfg, Data: map[string][]byte{api.DockerConfigKey: dockercfgContent}}},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email", nil}}),
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{
"index.docker.io/v1/": {Username: "built-in", Password: "password", Email: "email", Provider: nil},
}),
[]string{`ubuntu:latest using {"username":"passed-user","password":"passed-password","email":"passed-email"}`},
},
"builtin keyring secrets, but use passed with new docker config": {
"ubuntu",
[]api.Secret{{Type: api.SecretTypeDockerConfigJson, Data: map[string][]byte{api.DockerConfigJsonKey: dockerConfigJsonContent}}},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email", nil}}),
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{
"index.docker.io/v1/": {Username: "built-in", Password: "password", Email: "email", Provider: nil},
}),
[]string{`ubuntu:latest using {"username":"passed-user","password":"passed-password","email":"passed-email"}`},
},
}

View File

@ -108,7 +108,7 @@ func TestParallelPuller(t *testing.T) {
fakeRecorder := &record.FakeRecorder{}
puller := NewImageManager(fakeRecorder, fakeRuntime, backOff, false)
fakeRuntime.ImageList = []Image{{"present_image", nil, nil, 1}}
fakeRuntime.ImageList = []Image{{ID: "present_image", Size: 1}}
fakeRuntime.Err = c.pullerErr
fakeRuntime.InspectErr = c.inspectErr

View File

@ -945,32 +945,32 @@ func TestMakeVolumeMounts(t *testing.T) {
expectedMounts := []kubecontainer.Mount{
{
"disk",
"/etc/hosts",
"/mnt/disk",
false,
false,
Name: "disk",
ContainerPath: "/etc/hosts",
HostPath: "/mnt/disk",
ReadOnly: false,
SELinuxRelabel: false,
},
{
"disk",
"/mnt/path3",
"/mnt/disk",
true,
false,
Name: "disk",
ContainerPath: "/mnt/path3",
HostPath: "/mnt/disk",
ReadOnly: true,
SELinuxRelabel: false,
},
{
"disk4",
"/mnt/path4",
"/mnt/host",
false,
false,
Name: "disk4",
ContainerPath: "/mnt/path4",
HostPath: "/mnt/host",
ReadOnly: false,
SELinuxRelabel: false,
},
{
"disk5",
"/mnt/path5",
"/var/lib/kubelet/podID/volumes/empty/disk5",
false,
false,
Name: "disk5",
ContainerPath: "/mnt/path5",
HostPath: "/var/lib/kubelet/podID/volumes/empty/disk5",
ReadOnly: false,
SELinuxRelabel: false,
},
}
if !reflect.DeepEqual(mounts, expectedMounts) {

View File

@ -175,16 +175,16 @@ func TestHTTPHeaders(t *testing.T) {
}{
{[]api.HTTPHeader{}, http.Header{}},
{[]api.HTTPHeader{
{"X-Muffins-Or-Cupcakes", "Muffins"},
{Name: "X-Muffins-Or-Cupcakes", Value: "Muffins"},
}, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins"}}},
{[]api.HTTPHeader{
{"X-Muffins-Or-Cupcakes", "Muffins"},
{"X-Muffins-Or-Plumcakes", "Muffins!"},
{Name: "X-Muffins-Or-Cupcakes", Value: "Muffins"},
{Name: "X-Muffins-Or-Plumcakes", Value: "Muffins!"},
}, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins"},
"X-Muffins-Or-Plumcakes": {"Muffins!"}}},
{[]api.HTTPHeader{
{"X-Muffins-Or-Cupcakes", "Muffins"},
{"X-Muffins-Or-Cupcakes", "Cupcakes, too"},
{Name: "X-Muffins-Or-Cupcakes", Value: "Muffins"},
{Name: "X-Muffins-Or-Cupcakes", Value: "Cupcakes, too"},
}, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins", "Cupcakes, too"}}},
}
for _, test := range testCases {

View File

@ -860,7 +860,7 @@ func baseApp(t *testing.T) *appctypes.App {
SupplementaryGIDs: []int{4, 5, 6},
WorkingDirectory: "/foo",
Environment: []appctypes.EnvironmentVariable{
{"env-foo", "bar"},
{Name: "env-foo", Value: "bar"},
},
MountPoints: []appctypes.MountPoint{
{Name: *appctypes.MustACName("mnt-foo"), Path: "/mnt-foo", ReadOnly: false},
@ -997,7 +997,7 @@ func TestSetApp(t *testing.T) {
SupplementaryGIDs: []int{4, 5, 6},
WorkingDirectory: "/foo",
Environment: []appctypes.EnvironmentVariable{
{"env-foo", "bar"},
{Name: "env-foo", Value: "bar"},
},
MountPoints: []appctypes.MountPoint{
{Name: *appctypes.MustACName("mnt-foo"), Path: "/mnt-foo", ReadOnly: false},
@ -1056,8 +1056,8 @@ func TestSetApp(t *testing.T) {
SupplementaryGIDs: []int{1, 2, 3, 4},
WorkingDirectory: tmpDir,
Environment: []appctypes.EnvironmentVariable{
{"env-foo", "bar"},
{"env-bar", "foo"},
{Name: "env-foo", Value: "bar"},
{Name: "env-bar", Value: "foo"},
},
MountPoints: []appctypes.MountPoint{
{Name: *appctypes.MustACName("mnt-foo"), Path: "/mnt-foo", ReadOnly: false},
@ -1120,8 +1120,8 @@ func TestSetApp(t *testing.T) {
SupplementaryGIDs: []int{1, 2, 3, 4},
WorkingDirectory: tmpDir,
Environment: []appctypes.EnvironmentVariable{
{"env-foo", "foo"},
{"env-bar", "bar"},
{Name: "env-foo", Value: "foo"},
{Name: "env-bar", Value: "bar"},
},
MountPoints: []appctypes.MountPoint{
{Name: *appctypes.MustACName("mnt-foo"), Path: "/mnt-bar", ReadOnly: true},

View File

@ -229,9 +229,9 @@ func TestFindExternalAddress(t *testing.T) {
expectedIP := "172.0.0.1"
nodes := []*api.Node{new(api.Node), new(api.Node), new(api.Node)}
nodes[0].Status.Addresses = []api.NodeAddress{{"ExternalIP", expectedIP}}
nodes[1].Status.Addresses = []api.NodeAddress{{"LegacyHostIP", expectedIP}}
nodes[2].Status.Addresses = []api.NodeAddress{{"ExternalIP", expectedIP}, {"LegacyHostIP", "172.0.0.2"}}
nodes[0].Status.Addresses = []api.NodeAddress{{Type: "ExternalIP", Address: expectedIP}}
nodes[1].Status.Addresses = []api.NodeAddress{{Type: "LegacyHostIP", Address: expectedIP}}
nodes[2].Status.Addresses = []api.NodeAddress{{Type: "ExternalIP", Address: expectedIP}, {Type: "LegacyHostIP", Address: "172.0.0.2"}}
// Pass Case
for _, node := range nodes {

View File

@ -106,11 +106,9 @@ type fakeNeverRateLimiter struct {
}
func NewFakeNeverRateLimiter() RateLimiter {
wg := sync.WaitGroup{}
wg.Add(1)
return &fakeNeverRateLimiter{
wg: wg,
}
rl := fakeNeverRateLimiter{}
rl.wg.Add(1)
return &rl
}
func (t *fakeNeverRateLimiter) TryAccept() bool {

View File

@ -69,9 +69,9 @@ func DialURL(url *url.URL, transport http.RoundTripper) (net.Conn, error) {
inferredHost = host
}
// Make a copy to avoid polluting the provided config
tlsConfigCopy := *tlsConfig
tlsConfigCopy, _ := utilnet.TLSClientConfig(transport)
tlsConfigCopy.ServerName = inferredHost
tlsConfig = &tlsConfigCopy
tlsConfig = tlsConfigCopy
}
tlsConn = tls.Client(netConn, tlsConfig)
if err := tlsConn.Handshake(); err != nil {

View File

@ -217,7 +217,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{"foo", 80, api.ProtocolTCP}},
Ports: []api.EndpointPort{{Name: "foo", Port: 80, Protocol: api.ProtocolTCP}},
}},
}

View File

@ -86,7 +86,7 @@ func fakeFilepathGlob(devicePath string) (globs []string, err error) {
return []string{devicePath}, nil
}
func TestextractTransportname(t *testing.T) {
func TestExtractTransportname(t *testing.T) {
fakeIscsiadmOutput := []string{
"# BEGIN RECORD 2.0-873\n" +
"iface.iscsi_ifacename = default\n" +

View File

@ -721,9 +721,16 @@ func VerifyZeroDetachCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
// manager and fake volume plugin using a fake volume host.
func GetTestVolumePluginMgr(
t *testing.T) (*VolumePluginMgr, *FakeVolumePlugin) {
v := NewFakeVolumeHost(
"", /* rootDir */
nil, /* kubeClient */
nil, /* plugins */
"", /* rootContext */
)
plugins := ProbeVolumePlugins(VolumeConfig{})
volumePluginMgr := NewFakeVolumeHost(
"" /* rootDir */, nil /* kubeClient */, plugins, "" /* rootContext */).pluginMgr
if err := v.pluginMgr.InitPlugins(plugins, v); err != nil {
t.Fatal(err)
}
return &volumePluginMgr, plugins[0].(*FakeVolumePlugin)
return &v.pluginMgr, plugins[0].(*FakeVolumePlugin)
}

View File

@ -112,7 +112,7 @@ func TestFindMultipathDeviceForDevice(t *testing.T) {
}
}
func TestfindDeviceForPath(t *testing.T) {
func TestFindDeviceForPath(t *testing.T) {
io := &mockOsIOHandler{}
disk, err := findDeviceForPath("/dev/sde", io)

View File

@ -263,7 +263,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
test: "all machines are same priority as Affinity is nil",
},
// the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
@ -281,7 +281,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
"which doesn't match either pods in nodes or in topology key",
},
@ -299,7 +299,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
},
// there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference.
@ -323,7 +323,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 5}, {"machine3", 10}, {"machine4", 10}, {"machine5", 5}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 5}},
test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
},
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
@ -339,7 +339,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 2}, {"machine2", 10}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
},
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
@ -355,7 +355,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
},
{
@ -369,7 +369,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
},
@ -389,7 +389,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
},
{
@ -402,7 +402,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
},
{
@ -416,7 +416,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
},
// Test the symmetry cases for anti affinity
@ -430,7 +430,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
},
// Test both affinity and anti-affinity
@ -444,7 +444,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
},
// Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service),
@ -469,7 +469,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 4}, {"machine3", 10}, {"machine4", 10}, {"machine5", 4}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 4}},
test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
},
// Consider Affinity, Anti Affinity and symmetry together.
@ -491,7 +491,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 10}, {"machine4", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 0}},
test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
},
}
@ -565,7 +565,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
},
{
@ -580,7 +580,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
hardPodAffinityWeight: 0,
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
test: "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match",
},
}
@ -650,7 +650,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
},
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
test: "Soft Pod Anti Affinity: when the topologyKey is emtpy, match among topologyKeys indicated by failure domains.",
},
{
@ -664,7 +664,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
},
failureDomains: priorityutil.Topologies{},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
test: "Soft Pod Anti Affinity: when the topologyKey is emtpy, and no failure domains indicated, regard as topologyKey not match.",
},
}

View File

@ -107,7 +107,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
test: "all machines are same priority as NodeAffinity is nil",
},
{
@ -121,7 +121,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
test: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
},
{
@ -135,7 +135,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
test: "only machine1 matches the preferred scheduling requirements of pod",
},
{
@ -149,7 +149,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: label5}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 1}, {"machine5", 10}, {"machine2", 3}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: 10}, {Host: "machine2", Score: 3}},
test: "all machines matches the preferred scheduling requirements of pod but with different priorities ",
},
}

View File

@ -258,7 +258,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: noResources},
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
test: "nothing scheduled, nothing requested",
},
{
@ -275,7 +275,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 3}, {"machine2", 5}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 5}},
test: "nothing scheduled, resources requested, differently sized machines",
},
{
@ -292,7 +292,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: noResources},
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
test: "no resources requested, pods scheduled",
pods: []*api.Pod{
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
@ -315,7 +315,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: noResources},
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 5}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 5}},
test: "no resources requested, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
@ -338,7 +338,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 4}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 4}},
test: "resources requested, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -359,7 +359,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 6}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 6}},
test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -380,7 +380,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuOnly},
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 2}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 2}},
test: "requested resources exceed node capacity",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -390,7 +390,7 @@ func TestLeastRequested(t *testing.T) {
{
pod: &api.Pod{Spec: noResources},
nodes: []*api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
test: "zero node resources, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -428,7 +428,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
label: "baz",
presence: true,
test: "no match found, presence true",
@ -439,7 +439,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
label: "baz",
presence: false,
test: "no match found, presence false",
@ -450,7 +450,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
label: "foo",
presence: true,
test: "one match found, presence true",
@ -461,7 +461,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
label: "foo",
presence: false,
test: "one match found, presence false",
@ -472,7 +472,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
label: "bar",
presence: true,
test: "two matches found, presence true",
@ -483,7 +483,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
label: "bar",
presence: false,
test: "two matches found, presence false",
@ -591,7 +591,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: noResources},
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
test: "nothing scheduled, nothing requested",
},
{
@ -608,7 +608,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 10}},
test: "nothing scheduled, resources requested, differently sized machines",
},
{
@ -625,7 +625,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: noResources},
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
test: "no resources requested, pods scheduled",
pods: []*api.Pod{
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
@ -648,7 +648,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: noResources},
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 4}, {"machine2", 6}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 6}},
test: "no resources requested, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
@ -671,7 +671,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 9}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 9}},
test: "resources requested, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -692,7 +692,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 6}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 6}},
test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -713,7 +713,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuOnly},
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
test: "requested resources exceed node capacity",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -723,7 +723,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
{
pod: &api.Pod{Spec: noResources},
nodes: []*api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
test: "zero node resources, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -841,7 +841,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: (250M-23M)/97.7M + 1 = 3
pod: &api.Pod{Spec: test_40_250},
nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
expectedList: []schedulerapi.HostPriority{{"machine1", 1}, {"machine2", 3}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}},
test: "two images spread on two nodes, prefer the larger image one",
},
{
@ -856,7 +856,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 0
pod: &api.Pod{Spec: test_40_140},
nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
expectedList: []schedulerapi.HostPriority{{"machine1", 2}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}},
test: "two images on one node, prefer this node",
},
{
@ -871,7 +871,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 10 < min score = 0
pod: &api.Pod{Spec: test_min_max},
nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
test: "if exceed limit, use limit",
},
}
@ -1018,7 +1018,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
},
},
nodes: testNodes,
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
test: "pod managed by ReplicationController should avoid a node, this node get lowest priority score",
},
{
@ -1038,7 +1038,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
},
},
nodes: testNodes,
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}},
test: "pod managed by ReplicaSet should avoid a node, this node get lowest priority score",
},
{
@ -1054,7 +1054,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
},
},
nodes: testNodes,
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
test: "pod should not avoid these nodes, all nodes get highest priority score",
},
}

View File

@ -66,14 +66,14 @@ func TestSelectorSpreadPriority(t *testing.T) {
{
pod: new(api.Pod),
nodes: []string{"machine1", "machine2"},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
test: "nothing scheduled",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{{Spec: zone1Spec}},
nodes: []string{"machine1", "machine2"},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
test: "no services",
},
{
@ -81,7 +81,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
test: "different services",
},
{
@ -92,7 +92,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
test: "two pods, one service pod",
},
{
@ -106,7 +106,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
test: "five pods, one service pod in no namespace",
},
{
@ -119,7 +119,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
test: "four pods, one service pod in default namespace",
},
{
@ -133,7 +133,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
test: "five pods, one service pod in specific namespace",
},
{
@ -145,7 +145,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
test: "three pods, two service pods on different machines",
},
{
@ -158,7 +158,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 0}},
test: "four pods, three service pods",
},
{
@ -170,7 +170,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
test: "service with partial pod label matches",
},
{
@ -185,7 +185,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
// do spreading between all pods. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
test: "service with partial pod label matches with service and replication controller",
},
{
@ -199,7 +199,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
test: "service with partial pod label matches with service and replica set",
},
{
@ -213,7 +213,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
// Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
test: "disjoined service and replication controller should be treated equally",
},
{
@ -227,7 +227,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
test: "disjoined service and replica set should be treated equally",
},
{
@ -240,7 +240,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
// Both Nodes have one pod from the given RC, hence both get 0 score.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
test: "Replication controller with partial pod label matches",
},
{
@ -253,7 +253,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
test: "Replica set with partial pod label matches",
},
{
@ -265,7 +265,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
test: "Another replication controller with partial pod label matches",
},
{
@ -278,7 +278,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
test: "Another replication set with partial pod label matches",
},
}
@ -353,12 +353,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
{
pod: new(api.Pod),
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 10},
{nodeMachine1Zone2, 10},
{nodeMachine2Zone2, 10},
{nodeMachine1Zone3, 10},
{nodeMachine2Zone3, 10},
{nodeMachine3Zone3, 10},
{Host: nodeMachine1Zone1, Score: 10},
{Host: nodeMachine1Zone2, Score: 10},
{Host: nodeMachine2Zone2, Score: 10},
{Host: nodeMachine1Zone3, Score: 10},
{Host: nodeMachine2Zone3, Score: 10},
{Host: nodeMachine3Zone3, Score: 10},
},
test: "nothing scheduled",
},
@ -366,12 +366,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
pod: buildPod("", labels1, nil),
pods: []*api.Pod{buildPod(nodeMachine1Zone1, nil, nil)},
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 10},
{nodeMachine1Zone2, 10},
{nodeMachine2Zone2, 10},
{nodeMachine1Zone3, 10},
{nodeMachine2Zone3, 10},
{nodeMachine3Zone3, 10},
{Host: nodeMachine1Zone1, Score: 10},
{Host: nodeMachine1Zone2, Score: 10},
{Host: nodeMachine2Zone2, Score: 10},
{Host: nodeMachine1Zone3, Score: 10},
{Host: nodeMachine2Zone3, Score: 10},
{Host: nodeMachine3Zone3, Score: 10},
},
test: "no services",
},
@ -380,12 +380,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
pods: []*api.Pod{buildPod(nodeMachine1Zone1, labels2, nil)},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 10},
{nodeMachine1Zone2, 10},
{nodeMachine2Zone2, 10},
{nodeMachine1Zone3, 10},
{nodeMachine2Zone3, 10},
{nodeMachine3Zone3, 10},
{Host: nodeMachine1Zone1, Score: 10},
{Host: nodeMachine1Zone2, Score: 10},
{Host: nodeMachine2Zone2, Score: 10},
{Host: nodeMachine1Zone3, Score: 10},
{Host: nodeMachine2Zone3, Score: 10},
{Host: nodeMachine3Zone3, Score: 10},
},
test: "different services",
},
@ -397,12 +397,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 10},
{nodeMachine1Zone2, 0}, // Already have pod on machine
{nodeMachine2Zone2, 3}, // Already have pod in zone
{nodeMachine1Zone3, 10},
{nodeMachine2Zone3, 10},
{nodeMachine3Zone3, 10},
{Host: nodeMachine1Zone1, Score: 10},
{Host: nodeMachine1Zone2, Score: 0}, // Already have pod on machine
{Host: nodeMachine2Zone2, Score: 3}, // Already have pod in zone
{Host: nodeMachine1Zone3, Score: 10},
{Host: nodeMachine2Zone3, Score: 10},
{Host: nodeMachine3Zone3, Score: 10},
},
test: "two pods, 1 matching (in z2)",
},
@ -417,12 +417,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 10},
{nodeMachine1Zone2, 0}, // Pod on node
{nodeMachine2Zone2, 0}, // Pod on node
{nodeMachine1Zone3, 6}, // Pod in zone
{nodeMachine2Zone3, 3}, // Pod on node
{nodeMachine3Zone3, 6}, // Pod in zone
{Host: nodeMachine1Zone1, Score: 10},
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
{Host: nodeMachine2Zone2, Score: 0}, // Pod on node
{Host: nodeMachine1Zone3, Score: 6}, // Pod in zone
{Host: nodeMachine2Zone3, Score: 3}, // Pod on node
{Host: nodeMachine3Zone3, Score: 6}, // Pod in zone
},
test: "five pods, 3 matching (z2=2, z3=1)",
},
@ -436,12 +436,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 0}, // Pod on node
{nodeMachine1Zone2, 0}, // Pod on node
{nodeMachine2Zone2, 3}, // Pod in zone
{nodeMachine1Zone3, 0}, // Pod on node
{nodeMachine2Zone3, 3}, // Pod in zone
{nodeMachine3Zone3, 3}, // Pod in zone
{Host: nodeMachine1Zone1, Score: 0}, // Pod on node
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
{Host: nodeMachine2Zone2, Score: 3}, // Pod in zone
{Host: nodeMachine1Zone3, Score: 0}, // Pod on node
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
},
test: "four pods, 3 matching (z1=1, z2=1, z3=1)",
},
@ -455,12 +455,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 0}, // Pod on node
{nodeMachine1Zone2, 0}, // Pod on node
{nodeMachine2Zone2, 3}, // Pod in zone
{nodeMachine1Zone3, 0}, // Pod on node
{nodeMachine2Zone3, 3}, // Pod in zone
{nodeMachine3Zone3, 3}, // Pod in zone
{Host: nodeMachine1Zone1, Score: 0}, // Pod on node
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
{Host: nodeMachine2Zone2, Score: 3}, // Pod in zone
{Host: nodeMachine1Zone3, Score: 0}, // Pod on node
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
},
test: "four pods, 3 matching (z1=1, z2=1, z3=1)",
},
@ -480,12 +480,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
// We would probably prefer to see a bigger gap between putting a second
// pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct.
// This is also consistent with what we have already.
{nodeMachine1Zone1, 10}, // No pods in zone
{nodeMachine1Zone2, 5}, // Pod on node
{nodeMachine2Zone2, 6}, // Pod in zone
{nodeMachine1Zone3, 0}, // Two pods on node
{nodeMachine2Zone3, 3}, // Pod in zone
{nodeMachine3Zone3, 3}, // Pod in zone
{Host: nodeMachine1Zone1, Score: 10}, // No pods in zone
{Host: nodeMachine1Zone2, Score: 5}, // Pod on node
{Host: nodeMachine2Zone2, Score: 6}, // Pod in zone
{Host: nodeMachine1Zone3, Score: 0}, // Two pods on node
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
},
test: "Replication controller spreading (z1=0, z2=1, z3=2)",
},
@ -555,18 +555,18 @@ func TestZoneSpreadPriority(t *testing.T) {
{
pod: new(api.Pod),
nodes: labeledNodes,
expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
test: "nothing scheduled",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{{Spec: zone1Spec}},
nodes: labeledNodes,
expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
test: "no services",
},
{
@ -574,9 +574,9 @@ func TestZoneSpreadPriority(t *testing.T) {
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
test: "different services",
},
{
@ -588,9 +588,9 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 0}, {"machine22", 0},
{"machine01", 0}, {"machine02", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
{Host: "machine21", Score: 0}, {Host: "machine22", Score: 0},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
test: "three pods, one service pod",
},
{
@ -602,9 +602,9 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 5}, {"machine12", 5},
{"machine21", 5}, {"machine22", 5},
{"machine01", 0}, {"machine02", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 5}, {Host: "machine12", Score: 5},
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
test: "three pods, two service pods on different machines",
},
{
@ -617,9 +617,9 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 0}, {"machine12", 0},
{"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0},
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
test: "three service label match pods in different namespaces",
},
{
@ -632,9 +632,9 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 6}, {"machine12", 6},
{"machine21", 3}, {"machine22", 3},
{"machine01", 0}, {"machine02", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 6}, {Host: "machine12", Score: 6},
{Host: "machine21", Score: 3}, {Host: "machine22", Score: 3},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
test: "four pods, three service pods",
},
{
@ -646,9 +646,9 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 3}, {"machine12", 3},
{"machine21", 6}, {"machine22", 6},
{"machine01", 0}, {"machine02", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 3}, {Host: "machine12", Score: 3},
{Host: "machine21", Score: 6}, {Host: "machine22", Score: 6},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
test: "service with partial pod label matches",
},
{
@ -661,9 +661,9 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 7}, {"machine12", 7},
{"machine21", 5}, {"machine22", 5},
{"machine01", 0}, {"machine02", 0}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 7}, {Host: "machine12", Score: 7},
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
test: "service pod on non-zoned node",
},
}

View File

@ -170,7 +170,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
}{
{
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{EqualPriority, 1}},
prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}},
extenders: []FakeExtender{
{
predicates: []fitPredicate{truePredicateExtender},
@ -185,7 +185,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
},
{
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{EqualPriority, 1}},
prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}},
extenders: []FakeExtender{
{
predicates: []fitPredicate{truePredicateExtender},
@ -200,7 +200,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
},
{
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{EqualPriority, 1}},
prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}},
extenders: []FakeExtender{
{
predicates: []fitPredicate{truePredicateExtender},
@ -215,7 +215,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
},
{
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{EqualPriority, 1}},
prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}},
extenders: []FakeExtender{
{
predicates: []fitPredicate{machine2PredicateExtender},
@ -230,7 +230,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
},
{
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{EqualPriority, 1}},
prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}},
extenders: []FakeExtender{
{
predicates: []fitPredicate{truePredicateExtender},
@ -244,7 +244,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
},
{
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{EqualPriority, 1}},
prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}},
extenders: []FakeExtender{
{
predicates: []fitPredicate{truePredicateExtender},
@ -263,7 +263,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
},
{
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{machine2Prioritizer, 20}},
prioritizers: []algorithm.PriorityConfig{{Function: machine2Prioritizer, Weight: 20}},
extenders: []FakeExtender{
{
predicates: []fitPredicate{truePredicateExtender},

View File

@ -114,14 +114,12 @@ func NewMasterComponents(c *Config) *MasterComponents {
if c.StartReplicationManager {
go controllerManager.Run(goruntime.NumCPU(), rcStopCh)
}
var once sync.Once
return &MasterComponents{
ApiServer: s,
KubeMaster: m,
RestClient: restClient,
ControllerManager: controllerManager,
rcStopCh: rcStopCh,
once: once,
}
}