mirror of https://github.com/k3s-io/k3s
Adding label checker predicates and test cases
parent
04db076e5f
commit
9dd7d2a0a1
|
@ -167,6 +167,43 @@ func PodFitsHost(pod api.Pod, existingPods []api.Pod, node string) (bool, error)
|
|||
return pod.Spec.Host == node, nil
|
||||
}
|
||||
|
||||
type NodeLabelChecker struct {
|
||||
info NodeInfo
|
||||
labels []string
|
||||
presence bool
|
||||
}
|
||||
|
||||
func NewNodeLabelPredicate(info NodeInfo, labels []string, presence bool) FitPredicate {
|
||||
labelChecker := &NodeLabelChecker{
|
||||
info: info,
|
||||
labels: labels,
|
||||
presence: presence,
|
||||
}
|
||||
return labelChecker.CheckNodeLabelPresence
|
||||
}
|
||||
|
||||
// CheckNodeLabelPresence checks whether a particular label exists on a minion or not, regardless of its value
|
||||
// Consider the cases where the minions are places in regions/zones/racks and these are identified by labels
|
||||
// In some cases, it is required that only minions that are part of ANY of the defined regions/zones/racks be selected
|
||||
//
|
||||
// Alternately, eliminating minions that have a certain label, regardless of value, is also useful
|
||||
// A minion may have a label with "retiring" as key and the date as the value
|
||||
// and it may be desirable to avoid scheduling new pods on this minion
|
||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
||||
var exists bool
|
||||
minion, err := n.info.GetNodeInfo(node)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, label := range n.labels {
|
||||
exists = labels.Set(minion.Labels).Has(label)
|
||||
if (exists && !n.presence) || (!exists && n.presence) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func PodFitsPorts(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
||||
existingPorts := getUsedPorts(existingPods...)
|
||||
wantPorts := getUsedPorts(pod)
|
||||
|
|
|
@ -386,3 +386,63 @@ func TestPodFitsSelector(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeLabelPresence(t *testing.T) {
|
||||
label := map[string]string{"foo": "bar", "bar": "foo"}
|
||||
tests := []struct {
|
||||
pod api.Pod
|
||||
existingPods []api.Pod
|
||||
labels []string
|
||||
presence bool
|
||||
fits bool
|
||||
test string
|
||||
}{
|
||||
{
|
||||
labels: []string{"baz"},
|
||||
presence: true,
|
||||
fits: false,
|
||||
test: "label does not match, presence true",
|
||||
},
|
||||
{
|
||||
labels: []string{"baz"},
|
||||
presence: false,
|
||||
fits: true,
|
||||
test: "label does not match, presence false",
|
||||
},
|
||||
{
|
||||
labels: []string{"foo", "baz"},
|
||||
presence: true,
|
||||
fits: false,
|
||||
test: "one label matches, presence true",
|
||||
},
|
||||
{
|
||||
labels: []string{"foo", "baz"},
|
||||
presence: false,
|
||||
fits: false,
|
||||
test: "one label matches, presence false",
|
||||
},
|
||||
{
|
||||
labels: []string{"foo", "bar"},
|
||||
presence: true,
|
||||
fits: true,
|
||||
test: "all labels match, presence true",
|
||||
},
|
||||
{
|
||||
labels: []string{"foo", "bar"},
|
||||
presence: false,
|
||||
fits: false,
|
||||
test: "all labels match, presence false",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
node := api.Node{ObjectMeta: api.ObjectMeta{Labels: label}}
|
||||
labelChecker := NodeLabelChecker{FakeNodeInfo(node), test.labels, test.presence}
|
||||
fits, err := labelChecker.CheckNodeLabelPresence(test.pod, test.existingPods, "machine")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if fits != test.fits {
|
||||
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package scheduler
|
|||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
|
@ -88,3 +89,47 @@ func LeastRequestedPriority(pod api.Pod, podLister PodLister, minionLister Minio
|
|||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
type NodeLabelPrioritizer struct {
|
||||
label string
|
||||
presence bool
|
||||
}
|
||||
|
||||
func NewNodeLabelPriority(label string, presence bool) PriorityFunction {
|
||||
labelPrioritizer := &NodeLabelPrioritizer{
|
||||
label: label,
|
||||
presence: presence,
|
||||
}
|
||||
return labelPrioritizer.CalculateNodeLabelPriority
|
||||
}
|
||||
|
||||
// CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value
|
||||
// Consider the cases where the minions are places in regions/zones/racks and these are identified by labels
|
||||
// In some cases, it is required that only minions that are part of ANY of the defined regions/zones/racks be selected
|
||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
||||
var score int
|
||||
minions, err := minionLister.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// find the zones that the minions belong to
|
||||
labeledMinions := map[string]bool{}
|
||||
for _, minion := range minions.Items {
|
||||
exists := labels.Set(minion.Labels).Has(n.label)
|
||||
labeledMinions[minion.Name] = (exists && n.presence) || (!exists && !n.presence)
|
||||
}
|
||||
|
||||
result := []HostPriority{}
|
||||
//score int - scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
for minionName, success := range labeledMinions {
|
||||
if success {
|
||||
score = 10
|
||||
} else {
|
||||
score = 0
|
||||
}
|
||||
result = append(result, HostPriority{host: minionName, score: score})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package scheduler
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
|
@ -238,3 +239,102 @@ func TestLeastRequested(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewNodeLabelPriority(t *testing.T) {
|
||||
label1 := map[string]string{"foo": "bar"}
|
||||
label2 := map[string]string{"bar": "foo"}
|
||||
label3 := map[string]string{"bar": "baz"}
|
||||
tests := []struct {
|
||||
pod api.Pod
|
||||
pods []api.Pod
|
||||
nodes []api.Node
|
||||
label string
|
||||
presence bool
|
||||
expectedList HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
|
||||
label: "baz",
|
||||
presence: true,
|
||||
test: "no match found, presence true",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}},
|
||||
label: "baz",
|
||||
presence: false,
|
||||
test: "no match found, presence false",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
|
||||
label: "foo",
|
||||
presence: true,
|
||||
test: "one match found, presence true",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
|
||||
label: "foo",
|
||||
presence: false,
|
||||
test: "one match found, presence false",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
|
||||
label: "bar",
|
||||
presence: true,
|
||||
test: "two matches found, presence true",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
|
||||
label: "bar",
|
||||
presence: false,
|
||||
test: "two matches found, presence false",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
prioritizer := NodeLabelPrioritizer{
|
||||
label: test.label,
|
||||
presence: test.presence,
|
||||
}
|
||||
list, err := prioritizer.CalculateNodeLabelPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(api.NodeList{Items: test.nodes}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
// sort the two lists to avoid failures on account of different ordering
|
||||
sort.Sort(test.expectedList)
|
||||
sort.Sort(list)
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,25 +80,25 @@ func (s *ServiceSpread) CalculateSpreadPriority(pod api.Pod, podLister PodLister
|
|||
return result, nil
|
||||
}
|
||||
|
||||
type ZoneSpread struct {
|
||||
type ServiceAntiAffinity struct {
|
||||
serviceLister ServiceLister
|
||||
zoneLabel string
|
||||
label string
|
||||
}
|
||||
|
||||
func NewZoneSpreadPriority(serviceLister ServiceLister, zoneLabel string) PriorityFunction {
|
||||
zoneSpread := &ZoneSpread{
|
||||
func NewServiceAntiAffinityPriority(serviceLister ServiceLister, label string) PriorityFunction {
|
||||
antiAffinity := &ServiceAntiAffinity{
|
||||
serviceLister: serviceLister,
|
||||
zoneLabel: zoneLabel,
|
||||
label: label,
|
||||
}
|
||||
return zoneSpread.ZoneSpreadPriority
|
||||
return antiAffinity.CalculateAntiAffinityPriority
|
||||
}
|
||||
|
||||
func (z *ZoneSpread) ZoneSpreadPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
||||
var service api.Service
|
||||
var pods []api.Pod
|
||||
var err error
|
||||
|
||||
service, err = z.serviceLister.GetPodService(pod)
|
||||
service, err = s.serviceLister.GetPodService(pod)
|
||||
if err == nil {
|
||||
selector := labels.SelectorFromSet(service.Spec.Selector)
|
||||
pods, err = podLister.ListPods(selector)
|
||||
|
@ -116,8 +116,8 @@ func (z *ZoneSpread) ZoneSpreadPriority(pod api.Pod, podLister PodLister, minion
|
|||
openMinions := []string{}
|
||||
zonedMinions := map[string]string{}
|
||||
for _, minion := range minions.Items {
|
||||
if labels.Set(minion.Labels).Has(z.zoneLabel) {
|
||||
zone := labels.Set(minion.Labels).Get(z.zoneLabel)
|
||||
if labels.Set(minion.Labels).Has(s.label) {
|
||||
zone := labels.Set(minion.Labels).Get(s.label)
|
||||
zonedMinions[minion.Name] = zone
|
||||
} else {
|
||||
openMinions = append(openMinions, minion.Name)
|
||||
|
|
|
@ -270,8 +270,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
zoneSpread := ZoneSpread{serviceLister: FakeServiceLister(test.services), zoneLabel: "zone"}
|
||||
list, err := zoneSpread.ZoneSpreadPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(makeLabeledMinionList(test.nodes)))
|
||||
zoneSpread := ServiceAntiAffinity{serviceLister: FakeServiceLister(test.services), label: "zone"}
|
||||
list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(makeLabeledMinionList(test.nodes)))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func defaultPriorities() util.StringSet {
|
|||
factory.RegisterPriorityFunction("ServiceSpreadingPriority", algorithm.NewServiceSpreadPriority(factory.ServiceLister), 1),
|
||||
// spreads pods belonging to the same service across minions in different zones
|
||||
// TODO: remove the hardcoding of the "zone" label and move it to a constant
|
||||
factory.RegisterPriorityFunction("ZoneSpreadingPriority", algorithm.NewZoneSpreadPriority(factory.ServiceLister, "zone"), 1),
|
||||
factory.RegisterPriorityFunction("ZoneSpreadingPriority", algorithm.NewServiceAntiAffinityPriority(factory.ServiceLister, "zone"), 1),
|
||||
// EqualPriority is a prioritizer function that gives an equal weight of one to all minions
|
||||
factory.RegisterPriorityFunction("EqualPriority", algorithm.EqualPriority, 0),
|
||||
)
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
Copyright 2014 Google Inc. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This is the default algorithm provider for the scheduler.
|
||||
package labelchecker
|
||||
|
||||
import (
|
||||
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory"
|
||||
)
|
||||
|
||||
const Provider string = "LabelCheckerProvider"
|
||||
|
||||
func init() {
|
||||
factory.RegisterAlgorithmProvider(Provider, defaultPredicates(), defaultPriorities())
|
||||
}
|
||||
|
||||
func defaultPredicates() util.StringSet {
|
||||
return util.NewStringSet(
|
||||
// Fit is defined based on the presence/absence of a label on a minion, regardless of value.
|
||||
factory.RegisterFitPredicate("NodeLabelPredicate", algorithm.NewNodeLabelPredicate(factory.MinionLister, []string{"region"}, true)),
|
||||
)
|
||||
}
|
||||
|
||||
func defaultPriorities() util.StringSet {
|
||||
return util.NewStringSet(
|
||||
// Prioritize nodes based on the presence/absence of a label on a minion, regardless of value.
|
||||
factory.RegisterPriorityFunction("NodeLabelPriority", algorithm.NewNodeLabelPriority("", true), 1),
|
||||
)
|
||||
}
|
Loading…
Reference in New Issue