From 988aa6fdf67110cdfc012bf79a15d39c337f58bc Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Wed, 24 Jun 2015 21:55:40 -0700 Subject: [PATCH] Move things into a 'kube-system' namespace. --- .../google/heapster-controller.yaml | 2 +- .../google/heapster-service.yaml | 1 + .../heapster-controller-combined.yaml | 2 +- .../influxdb/grafana-service.yaml | 2 +- .../influxdb/heapster-controller.yaml | 2 +- .../influxdb/heapster-service.yaml | 5 ++-- .../influxdb/influxdb-grafana-controller.yaml | 4 +-- .../influxdb/influxdb-service.yaml | 2 +- .../standalone/heapster-controller.yaml | 2 +- .../standalone/heapster-service.yaml | 1 + cluster/addons/dns/skydns-rc.yaml.in | 2 +- cluster/addons/dns/skydns-svc.yaml.in | 2 +- .../fluentd-elasticsearch/es-controller.yaml | 4 +-- .../fluentd-elasticsearch/es-image/Makefile | 5 ++-- .../elasticsearch_logging_discovery.go | 4 +-- .../fluentd-elasticsearch/es-service.yaml | 2 +- .../fluentd-es-image/td-agent.conf | 17 +++++++++++- .../kibana-controller.yaml | 2 +- .../fluentd-elasticsearch/kibana-service.yaml | 2 +- cluster/addons/kube-ui/kube-ui-rc.yaml | 2 +- cluster/addons/kube-ui/kube-ui-svc.yaml | 2 +- .../saltbase/salt/fluentd-es/fluentd-es.yaml | 1 + .../salt/fluentd-gcp/fluentd-gcp.yaml | 1 + cluster/saltbase/salt/kube-addons/init.sls | 7 +++++ .../saltbase/salt/kube-addons/kube-addons.sh | 27 ++++++++++++------- .../saltbase/salt/kube-addons/namespace.yaml | 4 +++ .../limit-range/limit-range.yaml | 1 + pkg/api/types.go | 2 ++ pkg/kubectl/cmd/clusterinfo.go | 6 ++--- test/e2e/dns.go | 14 +++++----- test/e2e/e2e_test.go | 2 +- test/e2e/es_cluster_logging.go | 16 +++++------ test/e2e/monitoring.go | 8 +++--- 33 files changed, 100 insertions(+), 56 deletions(-) create mode 100644 cluster/saltbase/salt/kube-addons/namespace.yaml diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index 9de1eac390..f5b67dd485 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ReplicationController metadata: name: monitoring-heapster-v5 - namespace: default + namespace: kube-system labels: k8s-app: heapster version: v5 diff --git a/cluster/addons/cluster-monitoring/google/heapster-service.yaml b/cluster/addons/cluster-monitoring/google/heapster-service.yaml index 101ec6fe61..3dfbcb5844 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-service.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-service.yaml @@ -2,6 +2,7 @@ kind: Service apiVersion: v1 metadata: name: monitoring-heapster + namespace: kube-system labels: kubernetes.io/cluster-service: "true" kubernetes.io/name: "Heapster" diff --git a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml index 0beda9a10f..126a9ce3f9 100644 --- a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml +++ b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ReplicationController metadata: name: monitoring-heapster-v5 - namespace: default + namespace: kube-system labels: k8s-app: heapster version: v5 diff --git a/cluster/addons/cluster-monitoring/influxdb/grafana-service.yaml b/cluster/addons/cluster-monitoring/influxdb/grafana-service.yaml index 2df94a4075..f1cfb8a234 100644 --- a/cluster/addons/cluster-monitoring/influxdb/grafana-service.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/grafana-service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: name: monitoring-grafana - namespace: default + namespace: kube-system labels: kubernetes.io/cluster-service: "true" kubernetes.io/name: "Grafana" diff --git a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml index 030c34dccc..d8d2107ac2 100644 --- a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ReplicationController metadata: name: monitoring-heapster-v5 - namespace: default + namespace: kube-system labels: k8s-app: heapster version: v5 diff --git a/cluster/addons/cluster-monitoring/influxdb/heapster-service.yaml b/cluster/addons/cluster-monitoring/influxdb/heapster-service.yaml index 101ec6fe61..88c1139d89 100644 --- a/cluster/addons/cluster-monitoring/influxdb/heapster-service.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/heapster-service.yaml @@ -1,8 +1,9 @@ kind: Service apiVersion: v1 -metadata: +metadata: name: monitoring-heapster - labels: + namespace: kube-system + labels: kubernetes.io/cluster-service: "true" kubernetes.io/name: "Heapster" spec: diff --git a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml index ca2bb27cfb..8a2763063a 100644 --- a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ReplicationController metadata: name: monitoring-influx-grafana-v1 - namespace: default + namespace: kube-system labels: k8s-app: influxGrafana version: v1 @@ -42,7 +42,7 @@ spec: memory: 100Mi env: - name: INFLUXDB_EXTERNAL_URL - value: /api/v1/proxy/namespaces/default/services/monitoring-influxdb:api/db/ + value: /api/v1/proxy/namespaces/kube-system/services/monitoring-influxdb:api/db/ - name: INFLUXDB_HOST value: monitoring-influxdb - name: INFLUXDB_PORT diff --git a/cluster/addons/cluster-monitoring/influxdb/influxdb-service.yaml b/cluster/addons/cluster-monitoring/influxdb/influxdb-service.yaml index e608e3161c..066e052476 100644 --- a/cluster/addons/cluster-monitoring/influxdb/influxdb-service.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/influxdb-service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: name: monitoring-influxdb - namespace: default + namespace: kube-system labels: kubernetes.io/cluster-service: "true" kubernetes.io/name: "InfluxDB" diff --git a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml index ee765b1091..045f46ddfd 100644 --- a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ReplicationController metadata: name: monitoring-heapster-v5 - namespace: default + namespace: kube-system labels: k8s-app: heapster version: v5 diff --git a/cluster/addons/cluster-monitoring/standalone/heapster-service.yaml b/cluster/addons/cluster-monitoring/standalone/heapster-service.yaml index 101ec6fe61..3dfbcb5844 100644 --- a/cluster/addons/cluster-monitoring/standalone/heapster-service.yaml +++ b/cluster/addons/cluster-monitoring/standalone/heapster-service.yaml @@ -2,6 +2,7 @@ kind: Service apiVersion: v1 metadata: name: monitoring-heapster + namespace: kube-system labels: kubernetes.io/cluster-service: "true" kubernetes.io/name: "Heapster" diff --git a/cluster/addons/dns/skydns-rc.yaml.in b/cluster/addons/dns/skydns-rc.yaml.in index c6a0d4b05f..174ea5a77b 100644 --- a/cluster/addons/dns/skydns-rc.yaml.in +++ b/cluster/addons/dns/skydns-rc.yaml.in @@ -2,7 +2,7 @@ apiVersion: v1 kind: ReplicationController metadata: name: kube-dns-v5 - namespace: default + namespace: kube-system labels: k8s-app: kube-dns version: v5 diff --git a/cluster/addons/dns/skydns-svc.yaml.in b/cluster/addons/dns/skydns-svc.yaml.in index e2ba81b8a1..242c8871ee 100644 --- a/cluster/addons/dns/skydns-svc.yaml.in +++ b/cluster/addons/dns/skydns-svc.yaml.in @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: name: kube-dns - namespace: default + namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" diff --git a/cluster/addons/fluentd-elasticsearch/es-controller.yaml b/cluster/addons/fluentd-elasticsearch/es-controller.yaml index 20210f3af9..fa66c74691 100644 --- a/cluster/addons/fluentd-elasticsearch/es-controller.yaml +++ b/cluster/addons/fluentd-elasticsearch/es-controller.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ReplicationController metadata: name: elasticsearch-logging-v1 - namespace: default + namespace: kube-system labels: k8s-app: elasticsearch-logging version: v1 @@ -20,7 +20,7 @@ spec: kubernetes.io/cluster-service: "true" spec: containers: - - image: gcr.io/google_containers/elasticsearch:1.4 + - image: gcr.io/google_containers/elasticsearch:1.5 name: elasticsearch-logging resources: limits: diff --git a/cluster/addons/fluentd-elasticsearch/es-image/Makefile b/cluster/addons/fluentd-elasticsearch/es-image/Makefile index 50bd7c4933..90308ba51d 100755 --- a/cluster/addons/fluentd-elasticsearch/es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/es-image/Makefile @@ -1,7 +1,8 @@ .PHONY: elasticsearch_logging_discovery build push -# Keep this one version ahead to help prevent accidental pushes. -TAG = 1.4 +# The current value of the tag to be used for building and +# pushing an image to gcr.io +TAG = 1.5 build: elasticsearch_logging_discovery docker build -t gcr.io/google_containers/elasticsearch:$(TAG) . diff --git a/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go b/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go index 89e78c1459..c5ab844882 100644 --- a/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go +++ b/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go @@ -50,7 +50,7 @@ func main() { // Look for endpoints associated with the Elasticsearch loggging service. // First wait for the service to become available. for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) { - elasticsearch, err = c.Services(api.NamespaceDefault).Get("elasticsearch-logging") + elasticsearch, err = c.Services(api.NamespaceSystem).Get("elasticsearch-logging") if err == nil { break } @@ -67,7 +67,7 @@ func main() { // Wait for some endpoints. count := 0 for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) { - endpoints, err = c.Endpoints(api.NamespaceDefault).Get("elasticsearch-logging") + endpoints, err = c.Endpoints(api.NamespaceSystem).Get("elasticsearch-logging") if err != nil { continue } diff --git a/cluster/addons/fluentd-elasticsearch/es-service.yaml b/cluster/addons/fluentd-elasticsearch/es-service.yaml index 572df92b9c..abf1fd3f68 100644 --- a/cluster/addons/fluentd-elasticsearch/es-service.yaml +++ b/cluster/addons/fluentd-elasticsearch/es-service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: name: elasticsearch-logging - namespace: default + namespace: kube-system labels: k8s-app: elasticsearch-logging kubernetes.io/cluster-service: "true" diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/td-agent.conf b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/td-agent.conf index cd07b23515..96d87378f9 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/td-agent.conf +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/td-agent.conf @@ -19,7 +19,7 @@ # The time_format specification below makes sure we properly # parse the time format produced by Docker. This will be # submitted to Elasticsearch and should appear like: -# $ curl 'http://elasticsearch-logging.default:9200/_search?pretty' +# $ curl 'http://elasticsearch-logging:9200/_search?pretty' # ... # { # "_index" : "logstash-2014.09.25", @@ -94,6 +94,21 @@ tag docker + + type elasticsearch + log_level info + include_tag_key true + host elasticsearch-logging + port 9200 + logstash_format true + flush_interval 5s + # Never wait longer than 5 minutes between retries. + max_retry_wait 300 + # Disable the limit on the number of retries (retry forever). + disable_retry_limit + +>>>>>>> Move things into a 'kube-system' namespace. + type tail format none diff --git a/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml b/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml index f813fd68e3..893608aef6 100644 --- a/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml +++ b/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ReplicationController metadata: name: kibana-logging-v1 - namespace: default + namespace: kube-system labels: k8s-app: kibana-logging version: v1 diff --git a/cluster/addons/fluentd-elasticsearch/kibana-service.yaml b/cluster/addons/fluentd-elasticsearch/kibana-service.yaml index 3a1815c5df..43efada2c5 100644 --- a/cluster/addons/fluentd-elasticsearch/kibana-service.yaml +++ b/cluster/addons/fluentd-elasticsearch/kibana-service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: name: kibana-logging - namespace: default + namespace: kube-system labels: k8s-app: kibana-logging kubernetes.io/cluster-service: "true" diff --git a/cluster/addons/kube-ui/kube-ui-rc.yaml b/cluster/addons/kube-ui/kube-ui-rc.yaml index 1fe2b1dc85..74c439073a 100644 --- a/cluster/addons/kube-ui/kube-ui-rc.yaml +++ b/cluster/addons/kube-ui/kube-ui-rc.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ReplicationController metadata: name: kube-ui-v1 - namespace: default + namespace: kube-system labels: k8s-app: kube-ui version: v1 diff --git a/cluster/addons/kube-ui/kube-ui-svc.yaml b/cluster/addons/kube-ui/kube-ui-svc.yaml index 5dfcba3a3e..cf960c8bda 100644 --- a/cluster/addons/kube-ui/kube-ui-svc.yaml +++ b/cluster/addons/kube-ui/kube-ui-svc.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: name: kube-ui - namespace: default + namespace: kube-system labels: k8s-app: kube-ui kubernetes.io/cluster-service: "true" diff --git a/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml b/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml index 1d173d7742..264480a11e 100644 --- a/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml +++ b/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: fluentd-elasticsearch + namespace: kube-system spec: containers: - name: fluentd-elasticsearch diff --git a/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml b/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml index c699d4d8aa..7a658f896b 100644 --- a/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml +++ b/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: fluentd-cloud-logging + namespace: kube-system spec: containers: - name: fluentd-cloud-logging diff --git a/cluster/saltbase/salt/kube-addons/init.sls b/cluster/saltbase/salt/kube-addons/init.sls index 67c0ecda36..c5a7b31831 100644 --- a/cluster/saltbase/salt/kube-addons/init.sls +++ b/cluster/saltbase/salt/kube-addons/init.sls @@ -11,6 +11,13 @@ addon-dir-create: - require: - file: addon-dir-delete +/etc/kubernetes/addons/namespace.yaml: + file.managed: + - source: salt://kube-addons/namespace.yaml + - user: root + - group: root + - file_mode: 644 + {% if pillar.get('enable_cluster_monitoring', '').lower() == 'influxdb' %} /etc/kubernetes/addons/cluster-monitoring/influxdb: file.recurse: diff --git a/cluster/saltbase/salt/kube-addons/kube-addons.sh b/cluster/saltbase/salt/kube-addons/kube-addons.sh index 4cbb1cc1fb..0e4098063f 100644 --- a/cluster/saltbase/salt/kube-addons/kube-addons.sh +++ b/cluster/saltbase/salt/kube-addons/kube-addons.sh @@ -21,6 +21,8 @@ KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl} ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-600} +SYSTEM_NAMESPACE=kube-system + function create-kubeconfig-secret() { local -r token=$1 local -r username=$2 @@ -47,6 +49,7 @@ contexts: - context: cluster: local user: ${username} + namespace: ${SYSTEM_NAMESPACE} name: service-account-context current-context: service-account-context EOF @@ -67,6 +70,7 @@ contexts: - context: cluster: local user: ${username} + namespace: ${SYSTEM_NAMESPACE} name: service-account-context current-context: service-account-context EOF @@ -82,36 +86,39 @@ metadata: name: token-${safe_username} type: Opaque EOF - create-resource-from-string "${secretyaml}" 100 10 "Secret-for-token-for-user-${username}" & -# TODO: label the secrets with special label so kubectl does not show these? + create-resource-from-string "${secretyaml}" 100 10 "Secret-for-token-for-user-${username}" "${SYSTEM_NAMESPACE}" & } # $1 filename of addon to start. # $2 count of tries to start the addon. # $3 delay in seconds between two consecutive tries +# $4 namespace function start_addon() { local -r addon_filename=$1; local -r tries=$2; local -r delay=$3; + local -r namespace=$4 - create-resource-from-string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}" + create-resource-from-string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}" "${namespace}" } # $1 string with json or yaml. # $2 count of tries to start the addon. # $3 delay in seconds between two consecutive tries -# $3 name of this object to use when logging about it. +# $4 name of this object to use when logging about it. +# $5 namespace for this object function create-resource-from-string() { local -r config_string=$1; local tries=$2; local -r delay=$3; local -r config_name=$4; + local -r namespace=$5; while [ ${tries} -gt 0 ]; do - echo "${config_string}" | ${KUBECTL} create -f - && \ - echo "== Successfully started ${config_name} at $(date -Is)" && \ + echo "${config_string}" | ${KUBECTL} --namespace="${namespace}" create -f - && \ + echo "== Successfully started ${config_name} in namespace ${namespace} at $(date -Is)" && \ return 0; let tries=tries-1; - echo "== Failed to start ${config_name} at $(date -Is). ${tries} tries remaining. ==" + echo "== Failed to start ${config_name} in namespace ${namespace} at $(date -Is). ${tries} tries remaining. ==" sleep ${delay}; done return 1; @@ -143,6 +150,8 @@ done echo "== default service account has token ${token_found} ==" +start_addon /etc/kubernetes/addons/namespace.yaml 100 10 "" & + # Generate secrets for "internal service accounts". # TODO(etune): move to a completely yaml/object based # workflow so that service accounts can be created @@ -162,7 +171,7 @@ while read line; do else # Set the server to https://kubernetes. Pods/components that # do not have DNS available will have to override the server. - create-kubeconfig-secret "${token}" "${username}" "https://kubernetes" + create-kubeconfig-secret "${token}" "${username}" "https://kubernetes.default" fi done < /srv/kubernetes/known_tokens.csv @@ -170,7 +179,7 @@ done < /srv/kubernetes/known_tokens.csv # are defined in a namespace other than default, we should still create the limits for the # default namespace. for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \*.json \)); do - start_addon ${obj} 100 10 & + start_addon ${obj} 100 10 default & echo "++ obj ${obj} is created ++" done diff --git a/cluster/saltbase/salt/kube-addons/namespace.yaml b/cluster/saltbase/salt/kube-addons/namespace.yaml new file mode 100644 index 0000000000..986f4b4822 --- /dev/null +++ b/cluster/saltbase/salt/kube-addons/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system diff --git a/cluster/saltbase/salt/kube-admission-controls/limit-range/limit-range.yaml b/cluster/saltbase/salt/kube-admission-controls/limit-range/limit-range.yaml index f987fc7e30..de2bb13a9a 100644 --- a/cluster/saltbase/salt/kube-admission-controls/limit-range/limit-range.yaml +++ b/cluster/saltbase/salt/kube-admission-controls/limit-range/limit-range.yaml @@ -2,6 +2,7 @@ apiVersion: "v1" kind: "LimitRange" metadata: name: "limits" + namespace: default spec: limits: - type: "Container" diff --git a/pkg/api/types.go b/pkg/api/types.go index 4ef1ae4769..a15d87fdac 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -169,6 +169,8 @@ const ( NamespaceAll string = "" // NamespaceNone is the argument for a context when there is no namespace. NamespaceNone string = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem string = "kube-system" // TerminationMessagePathDefault means the default path to capture the application termination message running in a container TerminationMessagePathDefault string = "/dev/termination-log" ) diff --git a/pkg/kubectl/cmd/clusterinfo.go b/pkg/kubectl/cmd/clusterinfo.go index 60d050882f..70c4823371 100644 --- a/pkg/kubectl/cmd/clusterinfo.go +++ b/pkg/kubectl/cmd/clusterinfo.go @@ -57,9 +57,9 @@ func RunClusterInfo(factory *cmdutil.Factory, out io.Writer, cmd *cobra.Command) printService(out, "Kubernetes master", client.Host) mapper, typer := factory.Object() - cmdNamespace, _, err := factory.DefaultNamespace() - if err != nil { - return err + cmdNamespace := cmdutil.GetFlagString(cmd, "namespace") + if cmdNamespace == "" { + cmdNamespace = api.NamespaceSystem } // TODO use generalized labels once they are implemented (#341) diff --git a/test/e2e/dns.go b/test/e2e/dns.go index 4b3cdaf442..1944a2ff3e 100644 --- a/test/e2e/dns.go +++ b/test/e2e/dns.go @@ -188,20 +188,20 @@ var _ = Describe("DNS", func() { // TODO: support DNS on vagrant #3580 SkipIfProviderIs("vagrant") - podClient := f.Client.Pods(api.NamespaceDefault) - + systemClient := f.Client.Pods(api.NamespaceSystem) By("Waiting for DNS Service to be Running") - dnsPods, err := podClient.List(dnsServiceLableSelector, fields.Everything()) + dnsPods, err := systemClient.List(dnsServiceLableSelector, fields.Everything()) if err != nil { Failf("Failed to list all dns service pods") } if len(dnsPods.Items) != 1 { Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLableSelector.String()) } - expectNoError(waitForPodRunning(f.Client, dnsPods.Items[0].Name)) + expectNoError(waitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem)) // All the names we need to be able to resolve. // TODO: Spin up a separate test service and test that dns works for that service. + // TODO: Should these be changed to kubernetes.kube-system etc. ? namesToResolve := []string{ "kubernetes.default", "kubernetes.default.svc", @@ -227,17 +227,17 @@ var _ = Describe("DNS", func() { // TODO: support DNS on vagrant #3580 SkipIfProviderIs("vagrant") - podClient := f.Client.Pods(api.NamespaceDefault) + systemClient := f.Client.Pods(api.NamespaceSystem) By("Waiting for DNS Service to be Running") - dnsPods, err := podClient.List(dnsServiceLableSelector, fields.Everything()) + dnsPods, err := systemClient.List(dnsServiceLableSelector, fields.Everything()) if err != nil { Failf("Failed to list all dns service pods") } if len(dnsPods.Items) != 1 { Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLableSelector.String()) } - expectNoError(waitForPodRunning(f.Client, dnsPods.Items[0].Name)) + expectNoError(waitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem)) // Create a test headless service. By("Creating a test headless service") diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 1ad6978528..54a9c9751c 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -126,7 +126,7 @@ func TestE2E(t *testing.T) { // cluster infrastructure pods that are being pulled or started can block // test pods from running, and tests that ensure all pods are running and // ready will fail). - if err := waitForPodsRunningReady(api.NamespaceDefault, testContext.MinStartupPods, podStartupTimeout); err != nil { + if err := waitForPodsRunningReady(api.NamespaceSystem, testContext.MinStartupPods, podStartupTimeout); err != nil { t.Errorf("Error waiting for all pods to be running and ready: %v", err) return } diff --git a/test/e2e/es_cluster_logging.go b/test/e2e/es_cluster_logging.go index 5a8f1c24a1..a2d1417f94 100644 --- a/test/e2e/es_cluster_logging.go +++ b/test/e2e/es_cluster_logging.go @@ -70,7 +70,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { // Check for the existence of the Elasticsearch service. By("Checking the Elasticsearch service exists.") - s := f.Client.Services(api.NamespaceDefault) + s := f.Client.Services(api.NamespaceSystem) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. var err error @@ -85,10 +85,10 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { // Wait for the Elasticsearch pods to enter the running state. By("Checking to make sure the Elasticsearch pods are running") label := labels.SelectorFromSet(labels.Set(map[string]string{esKey: esValue})) - pods, err := f.Client.Pods(api.NamespaceDefault).List(label, fields.Everything()) + pods, err := f.Client.Pods(api.NamespaceSystem).List(label, fields.Everything()) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { - err = waitForPodRunning(f.Client, pod.Name) + err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) Expect(err).NotTo(HaveOccurred()) } @@ -100,7 +100,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { // Query against the root URL for Elasticsearch. body, err := f.Client.Get(). - Namespace(api.NamespaceDefault). + Namespace(api.NamespaceSystem). Prefix("proxy"). Resource("services"). Name("elasticsearch-logging"). @@ -146,7 +146,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { var body []byte for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { body, err = f.Client.Get(). - Namespace(api.NamespaceDefault). + Namespace(api.NamespaceSystem). Prefix("proxy"). Resource("services"). Name("elasticsearch-logging"). @@ -188,7 +188,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { return isNodeReadySetAsExpected(&node, true) }) if len(nodes.Items) < 2 { - Failf("Less than two nodes were found Ready.") + Failf("Less than two nodes were found Ready: %d", len(nodes.Items)) } Logf("Found %d healthy nodes.", len(nodes.Items)) @@ -257,7 +257,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { for start := time.Now(); time.Since(start) < ingestionTimeout; time.Sleep(10 * time.Second) { // Debugging code to report the status of the elasticsearch logging endpoints. - esPods, err := f.Client.Pods(api.NamespaceDefault).List(labels.Set{esKey: esValue}.AsSelector(), fields.Everything()) + esPods, err := f.Client.Pods(api.NamespaceSystem).List(labels.Set{esKey: esValue}.AsSelector(), fields.Everything()) if err != nil { Logf("Attempt to list Elasticsearch nodes encountered a problem -- may retry: %v", err) continue @@ -272,7 +272,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { // verison of the name. Ask for twice as many log lines as we expect to check for // duplication bugs. body, err = f.Client.Get(). - Namespace(api.NamespaceDefault). + Namespace(api.NamespaceSystem). Prefix("proxy"). Resource("services"). Name("elasticsearch-logging"). diff --git a/test/e2e/monitoring.go b/test/e2e/monitoring.go index d24acec522..98a7af254b 100644 --- a/test/e2e/monitoring.go +++ b/test/e2e/monitoring.go @@ -78,7 +78,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error // situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller // is running (which would be an error except during a rolling update). for _, rcLabel := range rcLabels { - rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Set{"k8s-app": rcLabel}.AsSelector()) + rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector()) if err != nil { return nil, err } @@ -87,7 +87,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error rcLabel, len(rcList.Items)) } for _, rc := range rcList.Items { - podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything()) + podList, err := c.Pods(api.NamespaceSystem).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything()) if err != nil { return nil, err } @@ -100,7 +100,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error } func expectedServicesExist(c *client.Client) error { - serviceList, err := c.Services(api.NamespaceDefault).List(labels.Everything()) + serviceList, err := c.Services(api.NamespaceSystem).List(labels.Everything()) if err != nil { return err } @@ -205,7 +205,7 @@ func testMonitoringUsingHeapsterInfluxdb(c *client.Client) { if !ok { Failf("failed to get master http client") } - proxyUrl := fmt.Sprintf("%s/api/v1/proxy/namespaces/default/services/%s:api/", getMasterHost(), influxdbService) + proxyUrl := fmt.Sprintf("%s/api/v1/proxy/namespaces/%s/services/%s:api/", getMasterHost(), api.NamespaceSystem, influxdbService) config := &influxdb.ClientConfig{ Host: proxyUrl, // TODO(vishh): Infer username and pw from the Pod spec.