mirror of https://github.com/k3s-io/k3s
Separate deletion and termination evictors in NodeController, and fix rate_limited_queue.go
parent
5d07236be6
commit
a3723e2045
|
@ -202,7 +202,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
|
|||
// TODO: Write an integration test for the replication controllers watch.
|
||||
go controllerManager.Run(3, util.NeverStop)
|
||||
|
||||
nodeController := nodecontroller.NewNodeController(nil, cl, 5*time.Minute, util.NewFakeRateLimiter(),
|
||||
nodeController := nodecontroller.NewNodeController(nil, cl, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(),
|
||||
40*time.Second, 60*time.Second, 5*time.Second, nil, false)
|
||||
nodeController.Run(5 * time.Second)
|
||||
cadvisorInterface := new(cadvisor.Fake)
|
||||
|
|
|
@ -259,6 +259,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||
|
||||
nodeController := nodecontroller.NewNodeController(cloud, kubeClient,
|
||||
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod)
|
||||
|
||||
|
|
|
@ -128,6 +128,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||
|
||||
nodeController := nodecontroller.NewNodeController(cloud, kubeClient,
|
||||
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod)
|
||||
|
||||
|
|
|
@ -105,7 +105,8 @@ func NewNodeController(
|
|||
cloud cloudprovider.Interface,
|
||||
kubeClient client.Interface,
|
||||
podEvictionTimeout time.Duration,
|
||||
podEvictionLimiter util.RateLimiter,
|
||||
deletionEvictionLimiter util.RateLimiter,
|
||||
terminationEvictionLimiter util.RateLimiter,
|
||||
nodeMonitorGracePeriod time.Duration,
|
||||
nodeStartupGracePeriod time.Duration,
|
||||
nodeMonitorPeriod time.Duration,
|
||||
|
@ -132,8 +133,8 @@ func NewNodeController(
|
|||
podEvictionTimeout: podEvictionTimeout,
|
||||
maximumGracePeriod: 5 * time.Minute,
|
||||
evictorLock: &evictorLock,
|
||||
podEvictor: NewRateLimitedTimedQueue(podEvictionLimiter),
|
||||
terminationEvictor: NewRateLimitedTimedQueue(podEvictionLimiter),
|
||||
podEvictor: NewRateLimitedTimedQueue(deletionEvictionLimiter),
|
||||
terminationEvictor: NewRateLimitedTimedQueue(terminationEvictionLimiter),
|
||||
nodeStatusMap: make(map[string]nodeStatusData),
|
||||
nodeMonitorGracePeriod: nodeMonitorGracePeriod,
|
||||
nodeMonitorPeriod: nodeMonitorPeriod,
|
||||
|
@ -601,8 +602,8 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
|
|||
continue
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Delete pod %v", pod.Name)
|
||||
nc.recorder.Eventf(&pod, "NodeControllerEviction", "Deleting Pod %s from Node %s", pod.Name, nodeName)
|
||||
glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
|
||||
nc.recorder.Eventf(&pod, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
|
||||
if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -325,7 +325,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, item.fakeNodeHandler,
|
||||
evictionTimeout, util.NewFakeRateLimiter(), testNodeMonitorGracePeriod,
|
||||
evictionTimeout, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(), testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
nodeController.now = func() unversioned.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
|
@ -543,7 +543,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, util.NewFakeRateLimiter(),
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
nodeController.now = func() unversioned.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
|
@ -621,7 +621,7 @@ func TestNodeDeletion(t *testing.T) {
|
|||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0"), *newPod("pod1", "node1")}}),
|
||||
}
|
||||
|
||||
nodeController := NewNodeController(nil, fakeNodeHandler, 5*time.Minute, util.NewFakeRateLimiter(),
|
||||
nodeController := NewNodeController(nil, fakeNodeHandler, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(),
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
nodeController.now = func() unversioned.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
@ -163,16 +164,14 @@ func (q *RateLimitedTimedQueue) Try(fn ActionFunc) {
|
|||
for ok {
|
||||
// rate limit the queue checking
|
||||
if !q.limiter.CanAccept() {
|
||||
glog.V(10).Info("Try rate limitted...")
|
||||
// Try again later
|
||||
break
|
||||
}
|
||||
|
||||
now := now()
|
||||
if now.Before(val.ProcessAt) {
|
||||
q.queue.Replace(val)
|
||||
val, ok = q.queue.Head()
|
||||
// we do not sleep here because other values may be added at the front of the queue
|
||||
continue
|
||||
break
|
||||
}
|
||||
|
||||
if ok, wait := fn(val); !ok {
|
||||
|
|
|
@ -175,10 +175,10 @@ func TestTryOrdering(t *testing.T) {
|
|||
order = append(order, value.Value)
|
||||
return true, 0
|
||||
})
|
||||
if !reflect.DeepEqual(order, []string{"first", "third", "second"}) {
|
||||
if !reflect.DeepEqual(order, []string{"first", "third"}) {
|
||||
t.Fatalf("order was wrong: %v", order)
|
||||
}
|
||||
if count != 4 {
|
||||
if count != 3 {
|
||||
t.Fatalf("unexpected iterations: %d", count)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue