remove rescheduler

pull/8/head
liangwei 2018-08-21 21:31:27 +08:00 committed by Lion-Wei
parent d017bebf6b
commit 5ea138f4e9
17 changed files with 7 additions and 239 deletions

View File

@ -388,7 +388,6 @@ function kube::release::package_kube_manifests_tarball() {
cp "${src_dir}/kube-controller-manager.manifest" "${dst_dir}"
cp "${src_dir}/kube-addon-manager.yaml" "${dst_dir}"
cp "${src_dir}/glbc.manifest" "${dst_dir}"
cp "${src_dir}/rescheduler.manifest" "${dst_dir}/"
cp "${src_dir}/e2e-image-puller.manifest" "${dst_dir}/"
cp "${src_dir}/etcd-empty-dir-cleanup.yaml" "${dst_dir}/"
local internal_manifest

View File

@ -273,21 +273,6 @@ data:
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
@id rescheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/es-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>

View File

@ -210,20 +210,6 @@ data:
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/gcp-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>

View File

@ -225,20 +225,6 @@ data:
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/gcp-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>

View File

@ -287,9 +287,6 @@ if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.

View File

@ -294,9 +294,6 @@ if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.

View File

@ -2543,16 +2543,6 @@ function start-lb-controller {
fi
}
# Starts rescheduler.
function start-rescheduler {
if [[ "${ENABLE_RESCHEDULER:-}" == "true" ]]; then
echo "Start Rescheduler"
prepare-log-file /var/log/rescheduler.log
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/rescheduler.manifest" \
/etc/kubernetes/manifests/
fi
}
# Setup working directory for kubelet.
function setup-kubelet-dir {
echo "Making /var/lib/kubelet executable for kubelet"
@ -2734,7 +2724,6 @@ function main() {
start-kube-addons
start-cluster-autoscaler
start-lb-controller
start-rescheduler
else
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
start-kube-proxy

View File

@ -25,7 +25,6 @@ filegroup(
"kube-controller-manager.manifest",
"kube-proxy.manifest",
"kube-scheduler.manifest",
"rescheduler.manifest",
] + glob(["internal-*"]),
)

View File

@ -6,8 +6,7 @@ metadata:
# This annotation ensures that kube-proxy does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that kube-proxy runs as a static pod so this annotation does NOT have
# any effect on rescheduler (default scheduler and rescheduler are not
# involved in scheduling kube-proxy).
# any effect on default scheduler which scheduling kube-proxy.
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:

View File

@ -1,36 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: rescheduler-v0.4.0
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: rescheduler
version: v0.4.0
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Rescheduler"
spec:
hostNetwork: true
containers:
- image: k8s.gcr.io/rescheduler:v0.4.0
name: rescheduler
volumeMounts:
- mountPath: /var/log/rescheduler.log
name: logfile
readOnly: false
# TODO: Make resource requirements depend on the size of the cluster
resources:
requests:
cpu: 5m
memory: 100Mi
command:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- 'exec /rescheduler --running-in-cluster=false 1>>/var/log/rescheduler.log 2>&1'
volumes:
- hostPath:
path: /var/log/rescheduler.log
type: FileOrCreate
name: logfile

View File

@ -844,7 +844,6 @@ ENABLE_NODE_PROBLEM_DETECTOR: $(yaml-quote ${ENABLE_NODE_PROBLEM_DETECTOR:-none}
NODE_PROBLEM_DETECTOR_VERSION: $(yaml-quote ${NODE_PROBLEM_DETECTOR_VERSION:-})
NODE_PROBLEM_DETECTOR_TAR_HASH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TAR_HASH:-})
ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
ENABLE_RESCHEDULER: $(yaml-quote ${ENABLE_RESCHEDULER:-false})
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})

View File

@ -41,7 +41,7 @@ readonly master_ssh_supported_providers="gce aws kubernetes-anywhere"
readonly node_ssh_supported_providers="gce gke aws kubernetes-anywhere"
readonly gcloud_supported_providers="gce gke kubernetes-anywhere"
readonly master_logfiles="kube-apiserver kube-apiserver-audit kube-scheduler rescheduler kube-controller-manager etcd etcd-events glbc cluster-autoscaler kube-addon-manager fluentd"
readonly master_logfiles="kube-apiserver kube-apiserver-audit kube-scheduler kube-controller-manager etcd etcd-events glbc cluster-autoscaler kube-addon-manager fluentd"
readonly node_logfiles="kube-proxy fluentd node-problem-detector"
readonly node_systemd_services="node-problem-detector"
readonly hollow_node_logfiles="kubelet-hollow-node-* kubeproxy-hollow-node-* npd-hollow-node-*"

View File

@ -143,7 +143,7 @@ func (sp SyncPodType) String() string {
}
// IsCriticalPod returns true if the pod bears the critical pod annotation key or if pod's priority is greater than
// or equal to SystemCriticalPriority. Both the rescheduler(deprecated in 1.10) and the kubelet use this function
// or equal to SystemCriticalPriority. Both the default scheduler and the kubelet use this function
// to make admission and scheduling decisions.
func IsCriticalPod(pod *v1.Pod) bool {
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {

View File

@ -11,7 +11,6 @@ go_library(
"predicates.go",
"preemption.go",
"priorities.go",
"rescheduler.go",
"resource_quota.go",
"ubernetes_lite.go",
"ubernetes_lite_volumes.go",
@ -19,7 +18,6 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/scheduling",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/scheduling:go_default_library",

View File

@ -35,6 +35,10 @@ import (
_ "github.com/stretchr/testify/assert"
)
const (
defaultTimeout = 3 * time.Minute
)
var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList

View File

@ -1,133 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
defaultTimeout = 3 * time.Minute
)
// This test requires Rescheduler to be enabled.
var _ = SIGDescribe("Rescheduler [Serial]", func() {
f := framework.NewDefaultFramework("rescheduler")
var ns string
var totalMillicores int
BeforeEach(func() {
framework.Skipf("Rescheduler is being deprecated soon in Kubernetes 1.10")
ns = f.Namespace.Name
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount := len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
totalMillicores = int((&cpu).MilliValue()) * nodeCount
})
It("should ensure that critical pod is scheduled in case there is no resources available", func() {
By("reserving all available cpu")
err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "reserve-all-cpu")
framework.ExpectNoError(err)
By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kubernetes-dashboard"}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
deployments, err := f.ClientSet.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).List(listOpts)
framework.ExpectNoError(err)
Expect(len(deployments.Items)).Should(Equal(1))
deployment := deployments.Items[0]
replicas := uint(*(deployment.Spec.Replicas))
err = framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true)
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true))
framework.ExpectNoError(err)
})
})
func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
timeout := 5 * time.Minute
replicas := millicores / 100
reserveCpu(f, id, 1, 100)
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels)
if err != nil {
return err
}
if len(pods) != replicas {
continue
}
allRunningOrUnschedulable := true
for _, pod := range pods {
if !podRunningOrUnschedulable(pod) {
allRunningOrUnschedulable = false
break
}
}
if allRunningOrUnschedulable {
return nil
}
}
return fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", id, timeout, replicas)
}
func podRunningOrUnschedulable(pod *v1.Pod) bool {
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
if cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
return true
}
running, _ := testutils.PodRunningReady(pod)
return running
}
func reserveCpu(f *framework.Framework, id string, replicas, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
request := int64(millicores / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
CpuRequest: request,
}
framework.ExpectNoError(framework.RunRC(*config))
}

View File

@ -22,7 +22,6 @@
"Redis",
"ReplicaSet",
"ReplicationController",
"Rescheduler",
"RethinkDB",
"Secret",
"Security",