plumb --enable_garbage_collector from environment variable;

adding a simple e2e test
pull/6/head
Chao Xu 2016-06-06 17:23:36 -07:00
parent 8c8c5d97ca
commit 7a5b3c43a0
10 changed files with 223 additions and 12 deletions

View File

@ -640,6 +640,7 @@ ENABLE_MANIFEST_URL: $(yaml-quote ${ENABLE_MANIFEST_URL:-false})
MANIFEST_URL: $(yaml-quote ${MANIFEST_URL:-})
MANIFEST_URL_HEADER: $(yaml-quote ${MANIFEST_URL_HEADER:-})
NUM_NODES: $(yaml-quote ${NUM_NODES})
ENABLE_GARBAGE_COLLECTOR: $(yaml-quote ${ENABLE_GARBAGE_COLLECTOR:-false})
EOF
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
cat >>$file <<EOF

View File

@ -585,6 +585,7 @@ function start-kube-apiserver {
params+=" --tls-cert-file=/etc/srv/kubernetes/server.cert"
params+=" --tls-private-key-file=/etc/srv/kubernetes/server.key"
params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv"
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
@ -663,6 +664,7 @@ function start-kube-controller-manager {
params+=" --master=127.0.0.1:8080"
params+=" --root-ca-file=/etc/srv/kubernetes/ca.crt"
params+=" --service-account-private-key-file=/etc/srv/kubernetes/server.key"
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
if [[ -n "${INSTANCE_PREFIX:-}" ]]; then
params+=" --cluster-name=${INSTANCE_PREFIX}"
fi

View File

@ -59,7 +59,6 @@ function create-node-instance-template {
# It requires a whole slew of assumed variables, partially due to to
# the call to write-master-env. Listing them would be rather
# futile. Instead, we list the required calls to ensure any additional
#
# variables are set:
# ensure-temp-dir
# detect-project

View File

@ -122,7 +122,12 @@
{% set log_level = pillar['api_server_test_log_level'] -%}
{% endif -%}
{% set params = address + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout -%}
{% set enable_garbage_collector = "" -%}
{% if pillar['enable_garbage_collector'] is defined -%}
{% set enable_garbage_collector = "--enable-garbage-collector=" + pillar['enable_garbage_collector'] -%}
{% endif -%}
{% set params = address + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
{% set params = params + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + token_auth_file + " " + bind_address + " " + log_level + " " + advertise_address + " " + proxy_ssh_options + authz_mode + abac_policy_file + webhook_authentication_config + webhook_authorization_config -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration

View File

@ -27,6 +27,11 @@
{% set terminated_pod_gc = "--terminated-pod-gc-threshold=" + pillar['terminated_pod_gc_threshold'] -%}
{% endif -%}
{% set enable_garbage_collector = "" -%}
{% if pillar['enable_garbage_collector'] is defined -%}
{% set enable_garbage_collector = "--enable-garbage-collector=" + pillar['enable_garbage_collector'] -%}
{% endif -%}
{% set cloud_provider = "" -%}
{% set cloud_config = "" -%}
{% set cloud_config_mount = "" -%}
@ -68,7 +73,7 @@
{% set log_level = pillar['controller_manager_test_log_level'] -%}
{% endif -%}
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration

View File

@ -39,8 +39,10 @@ cluster/photon-controller/util.sh: node_name=${1}
cluster/rackspace/util.sh: local node_ip=$(nova show --minimal ${NODE_NAMES[$i]} \
cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest:{% set params = pillar['autoscaler_mig_config'] + " " + cloud_config -%}
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set enable_garbage_collector = pillar['enable_garbage_collector'] -%}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set enable_garbage_collector = pillar['enable_garbage_collector'] -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers + ":6443" -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}

View File

@ -684,6 +684,7 @@ func (gc *GarbageCollector) processItem(item *node) error {
}
func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
glog.V(0).Infof("GarbageCollector is starting running!")
for _, monitor := range gc.monitors {
go monitor.controller.Run(stopCh)
}

View File

@ -0,0 +1,197 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
func getOrphanOptions() *api.DeleteOptions {
var trueVar = true
return &api.DeleteOptions{OrphanDependents: &trueVar}
}
func getNonOrphanOptions() *api.DeleteOptions {
var falseVar = false
return &api.DeleteOptions{OrphanDependents: &falseVar}
}
func newOwnerRC(f *framework.Framework, name string) *v1.ReplicationController {
var replicas int32
replicas = 2
return &v1.ReplicationController{
TypeMeta: unversioned.TypeMeta{
Kind: "ReplicationController",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: &replicas,
Selector: map[string]string{"app": "gc-test"},
Template: &v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"app": "gc-test"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx:1.7.9",
},
},
},
},
},
}
}
// verifyRemainingObjects verifies if the number of the remaining replication
// controllers and pods are rcNum and podNum. It returns error if the
// communication with the API server fails.
func verifyRemainingObjects(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) {
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
pods, err := clientSet.Core().Pods(f.Namespace.Name).List(api.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
var ret = true
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
}
rcs, err := rcClient.List(api.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
}
if len(rcs.Items) != rcNum {
ret = false
By(fmt.Sprintf("expected %d RCs, got %d RCs", rcNum, len(rcs.Items)))
}
return ret, nil
}
var _ = framework.KubeDescribe("Garbage collector", func() {
f := framework.NewDefaultFramework("gc")
It("[Feature:GarbageCollector] should delete pods created by rc when not orphaning", func() {
clientSet := f.Clientset_1_3
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
rc := newOwnerRC(f, rcName)
By("create the rc")
rc, err := rcClient.Create(rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create some pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
pods, err := podClient.List(api.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
// We intentionally don't wait the number of pods to reach
// rc.Spec.Replicas. We want to see if the garbage collector and the
// rc manager work properly if the rc is deleted before it reaches
// stasis.
if len(pods.Items) > 0 {
return true, nil
} else {
return false, nil
}
}); err != nil {
framework.Failf("failed to wait for the rc to create some pods: %v", err)
}
By("delete the rc")
deleteOptions := getNonOrphanOptions()
deleteOptions.Preconditions = api.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
By("wait for all pods to be garbage collected")
// wait for the RCs and Pods to reach the expected numbers.
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingObjects(f, clientSet, 0, 0)
}); err != nil {
framework.Failf("failed to wait for all pods to be deleted: %v", err)
remainingPods, err := podClient.List(api.ListOptions{})
if err != nil {
framework.Failf("failed to list pods post mortem: %v", err)
} else {
framework.Failf("remaining pods are: %#v", remainingPods)
}
}
})
It("[Feature:GarbageCollector] should orphan pods created by rc", func() {
clientSet := f.Clientset_1_3
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
rc := newOwnerRC(f, rcName)
By("create the rc")
rc, err := rcClient.Create(rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create some pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc, err := rcClient.Get(rc.Name)
if err != nil {
return false, fmt.Errorf("Failed to get rc: %v", err)
}
if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil
} else {
return false, nil
}
}); err != nil {
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
}
By("delete the rc")
deleteOptions := getOrphanOptions()
deleteOptions.Preconditions = api.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
pods, err := podClient.List(api.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
return false, fmt.Errorf("expect %d pods, got %d pods", e, a)
}
return false, nil
}); err != nil && err != wait.ErrWaitTimeout {
framework.Failf("%v", err)
}
})
})

View File

@ -17,7 +17,6 @@ limitations under the License.
package e2e
import (
"fmt"
"strconv"
"time"
@ -71,7 +70,7 @@ func observePodCreation(w watch.Interface) {
framework.Failf("Failed to observe pod creation: %v", event)
}
case <-time.After(framework.PodStartTimeout):
Fail("Timeout while waiting for pod creation")
framework.Failf("Timeout while waiting for pod creation")
}
}
@ -91,7 +90,7 @@ func observeObjectDeletion(w watch.Interface) (obj runtime.Object) {
}
}
if !deleted {
Fail("Failed to observe pod deletion")
framework.Failf("Failed to observe pod deletion")
}
return
}
@ -164,7 +163,7 @@ var _ = framework.KubeDescribe("Generated release_1_2 clientset", func() {
options = api.ListOptions{LabelSelector: selector}
pods, err = podClient.List(options)
if err != nil {
Fail(fmt.Sprintf("Failed to list pods to verify deletion: %v", err))
framework.Failf("Failed to list pods to verify deletion: %v", err)
}
Expect(len(pods.Items)).To(Equal(0))
})
@ -238,7 +237,7 @@ var _ = framework.KubeDescribe("Generated release_1_3 clientset", func() {
options = api.ListOptions{LabelSelector: selector}
pods, err = podClient.List(options)
if err != nil {
Fail(fmt.Sprintf("Failed to list pods to verify deletion: %v", err))
framework.Failf("Failed to list pods to verify deletion: %v", err)
}
Expect(len(pods.Items)).To(Equal(0))
})

View File

@ -31,8 +31,8 @@ import (
// This test requires that --terminated-pod-gc-threshold=100 be set on the controller manager
//
// Slow by design (7 min)
var _ = framework.KubeDescribe("Garbage collector [Feature:GarbageCollector] [Slow]", func() {
f := framework.NewDefaultFramework("garbage-collector")
var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]", func() {
f := framework.NewDefaultFramework("pod-garbage-collector")
It("should handle the creation of 1000 pods", func() {
var count int
for count < 1000 {