From 14c3fc5afe6ddf51ec4a9d041dc1a3be390f29be Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Tue, 14 Jul 2015 09:32:07 -0700 Subject: [PATCH 1/3] move user docs to their new home --- docs/{ => user-guide}/accessing-the-cluster.md | 0 docs/{ => user-guide}/annotations.md | 0 .../{ => user-guide}/application-troubleshooting.md | 0 docs/{ => user-guide}/compute-resources.md | 0 docs/{ => user-guide}/container-environment.md | 0 docs/{ => user-guide}/containers.md | 0 docs/{ => user-guide}/debugging-services.md | 0 docs/{ => user-guide}/downward-api.md | 0 .../user-guide}/downward-api/README.md | 0 .../user-guide}/downward-api/dapi-pod.yaml | 0 .../user-guide}/environment-guide/README.md | 0 .../user-guide}/environment-guide/backend-rc.yaml | 0 .../user-guide}/environment-guide/backend-srv.yaml | 0 .../environment-guide/containers/README.md | 0 .../environment-guide/containers/backend/Dockerfile | 0 .../environment-guide/containers/backend/backend.go | 0 .../environment-guide/containers/show/Dockerfile | 0 .../environment-guide/containers/show/show.go | 0 .../user-guide}/environment-guide/diagram.png | Bin .../user-guide}/environment-guide/show-rc.yaml | 0 .../user-guide}/environment-guide/show-srv.yaml | 0 docs/{ => user-guide}/identifiers.md | 0 docs/{ => user-guide}/images.md | 0 docs/{ => user-guide}/influx.png | Bin docs/{ => user-guide}/k8s-ui-explore-filter.png | Bin docs/{ => user-guide}/k8s-ui-explore-groupby.png | Bin docs/{ => user-guide}/k8s-ui-explore-poddetail.png | Bin docs/{ => user-guide}/k8s-ui-explore.png | Bin docs/{ => user-guide}/k8s-ui-nodes.png | Bin docs/{ => user-guide}/k8s-ui-overview.png | Bin docs/{ => user-guide}/kibana.png | Bin docs/{ => user-guide}/kubeconfig-file.md | 0 docs/{ => user-guide}/labels.md | 0 {examples => docs/user-guide}/limitrange/README.md | 0 .../user-guide}/limitrange/invalid-pod.yaml | 0 .../user-guide}/limitrange/limits.yaml | 0 .../user-guide}/limitrange/namespace.yaml | 0 .../user-guide}/limitrange/valid-pod.yaml | 0 {examples => docs/user-guide}/liveness/README.md | 0 .../user-guide}/liveness/exec-liveness.yaml | 0 .../user-guide}/liveness/http-liveness.yaml | 0 .../user-guide}/liveness/image/Dockerfile | 0 .../user-guide}/liveness/image/Makefile | 0 .../user-guide}/liveness/image/server.go | 0 {examples => docs/user-guide}/logging-demo/Makefile | 0 .../user-guide}/logging-demo/README.md | 0 .../user-guide}/logging-demo/synth-logger.png | Bin .../user-guide}/logging-demo/synthetic_0_25lps.yaml | 0 .../user-guide}/logging-demo/synthetic_10lps.yaml | 0 docs/{ => user-guide}/logging.md | 0 docs/{ => user-guide}/monitoring-architecture.png | Bin docs/{ => user-guide}/monitoring.md | 0 {examples => docs/user-guide}/multi-pod.yaml | 0 docs/{ => user-guide}/namespaces.md | 0 .../user-guide}/node-selection/README.md | 0 .../user-guide}/node-selection/pod.yaml | 0 docs/{ => user-guide}/overview.md | 0 docs/{ => user-guide}/persistent-volumes.md | 0 .../user-guide}/persistent-volumes/README.md | 0 .../persistent-volumes/claims/claim-01.yaml | 0 .../persistent-volumes/claims/claim-02.yaml | 0 .../persistent-volumes/claims/claim-03.json | 0 .../persistent-volumes/simpletest/namespace.json | 0 .../persistent-volumes/simpletest/pod.yaml | 0 .../persistent-volumes/simpletest/service.json | 0 .../user-guide}/persistent-volumes/volumes/gce.yaml | 0 .../persistent-volumes/volumes/local-01.yaml | 0 .../persistent-volumes/volumes/local-02.yaml | 0 .../user-guide}/persistent-volumes/volumes/nfs.yaml | 0 docs/{ => user-guide}/pod-states.md | 0 {examples => docs/user-guide}/pod.yaml | 0 docs/{ => user-guide}/pods.md | 0 docs/{ => user-guide}/replication-controller.md | 0 {examples => docs/user-guide}/replication.yaml | 0 .../user-guide}/resourcequota/README.md | 0 .../user-guide}/resourcequota/limits.yaml | 0 .../user-guide}/resourcequota/namespace.yaml | 0 .../user-guide}/resourcequota/quota.yaml | 0 docs/{ => user-guide}/secrets.md | 0 {examples => docs/user-guide}/secrets/README.md | 0 .../user-guide}/secrets/secret-pod.yaml | 0 {examples => docs/user-guide}/secrets/secret.yaml | 0 docs/{ => user-guide}/security-context.md | 0 docs/{ => user-guide}/services-detail.png | Bin docs/{ => user-guide}/services-detail.svg | 0 docs/{ => user-guide}/services-overview.png | Bin docs/{ => user-guide}/services-overview.svg | 0 docs/{ => user-guide}/services.md | 0 docs/{ => user-guide}/sharing-clusters.md | 0 {examples => docs/user-guide}/simple-nginx.md | 0 {examples => docs/user-guide}/simple-yaml.md | 0 docs/{ => user-guide}/ui.md | 0 {examples => docs/user-guide}/update-demo/README.md | 0 .../user-guide}/update-demo/build-images.sh | 0 .../update-demo/images/kitten/Dockerfile | 0 .../update-demo/images/kitten/html/data.json | 0 .../update-demo/images/kitten/html/kitten.jpg | Bin .../update-demo/images/nautilus/Dockerfile | 0 .../update-demo/images/nautilus/html/data.json | 0 .../update-demo/images/nautilus/html/nautilus.jpg | Bin .../user-guide}/update-demo/kitten-rc.yaml | 0 .../user-guide}/update-demo/local/LICENSE.angular | 0 .../user-guide}/update-demo/local/angular.min.js | 0 .../update-demo/local/angular.min.js.map | 0 .../user-guide}/update-demo/local/index.html | 0 .../user-guide}/update-demo/local/script.js | 0 .../user-guide}/update-demo/local/style.css | 0 .../user-guide}/update-demo/nautilus-rc.yaml | 0 docs/{ => user-guide}/user-guide.md | 0 docs/{ => user-guide}/volumes.md | 0 {examples => docs/user-guide}/walkthrough/README.md | 0 {examples => docs/user-guide}/walkthrough/k8s201.md | 0 .../walkthrough/pod-with-http-healthcheck.yaml | 0 {examples => docs/user-guide}/walkthrough/pod1.yaml | 0 {examples => docs/user-guide}/walkthrough/pod2.yaml | 0 .../user-guide}/walkthrough/podtemplate.json | 0 .../walkthrough/replication-controller.yaml | 0 .../user-guide}/walkthrough/service.yaml | 0 docs/{ => user-guide}/working-with-resources.md | 0 119 files changed, 0 insertions(+), 0 deletions(-) rename docs/{ => user-guide}/accessing-the-cluster.md (100%) rename docs/{ => user-guide}/annotations.md (100%) rename docs/{ => user-guide}/application-troubleshooting.md (100%) rename docs/{ => user-guide}/compute-resources.md (100%) rename docs/{ => user-guide}/container-environment.md (100%) rename docs/{ => user-guide}/containers.md (100%) rename docs/{ => user-guide}/debugging-services.md (100%) rename docs/{ => user-guide}/downward-api.md (100%) rename {examples => docs/user-guide}/downward-api/README.md (100%) rename {examples => docs/user-guide}/downward-api/dapi-pod.yaml (100%) rename {examples => docs/user-guide}/environment-guide/README.md (100%) rename {examples => docs/user-guide}/environment-guide/backend-rc.yaml (100%) rename {examples => docs/user-guide}/environment-guide/backend-srv.yaml (100%) rename {examples => docs/user-guide}/environment-guide/containers/README.md (100%) rename {examples => docs/user-guide}/environment-guide/containers/backend/Dockerfile (100%) rename {examples => docs/user-guide}/environment-guide/containers/backend/backend.go (100%) rename {examples => docs/user-guide}/environment-guide/containers/show/Dockerfile (100%) rename {examples => docs/user-guide}/environment-guide/containers/show/show.go (100%) rename {examples => docs/user-guide}/environment-guide/diagram.png (100%) rename {examples => docs/user-guide}/environment-guide/show-rc.yaml (100%) rename {examples => docs/user-guide}/environment-guide/show-srv.yaml (100%) rename docs/{ => user-guide}/identifiers.md (100%) rename docs/{ => user-guide}/images.md (100%) rename docs/{ => user-guide}/influx.png (100%) rename docs/{ => user-guide}/k8s-ui-explore-filter.png (100%) rename docs/{ => user-guide}/k8s-ui-explore-groupby.png (100%) rename docs/{ => user-guide}/k8s-ui-explore-poddetail.png (100%) rename docs/{ => user-guide}/k8s-ui-explore.png (100%) rename docs/{ => user-guide}/k8s-ui-nodes.png (100%) rename docs/{ => user-guide}/k8s-ui-overview.png (100%) rename docs/{ => user-guide}/kibana.png (100%) rename docs/{ => user-guide}/kubeconfig-file.md (100%) rename docs/{ => user-guide}/labels.md (100%) rename {examples => docs/user-guide}/limitrange/README.md (100%) rename {examples => docs/user-guide}/limitrange/invalid-pod.yaml (100%) rename {examples => docs/user-guide}/limitrange/limits.yaml (100%) rename {examples => docs/user-guide}/limitrange/namespace.yaml (100%) rename {examples => docs/user-guide}/limitrange/valid-pod.yaml (100%) rename {examples => docs/user-guide}/liveness/README.md (100%) rename {examples => docs/user-guide}/liveness/exec-liveness.yaml (100%) rename {examples => docs/user-guide}/liveness/http-liveness.yaml (100%) rename {examples => docs/user-guide}/liveness/image/Dockerfile (100%) rename {examples => docs/user-guide}/liveness/image/Makefile (100%) rename {examples => docs/user-guide}/liveness/image/server.go (100%) rename {examples => docs/user-guide}/logging-demo/Makefile (100%) rename {examples => docs/user-guide}/logging-demo/README.md (100%) rename {examples => docs/user-guide}/logging-demo/synth-logger.png (100%) rename {examples => docs/user-guide}/logging-demo/synthetic_0_25lps.yaml (100%) rename {examples => docs/user-guide}/logging-demo/synthetic_10lps.yaml (100%) rename docs/{ => user-guide}/logging.md (100%) rename docs/{ => user-guide}/monitoring-architecture.png (100%) rename docs/{ => user-guide}/monitoring.md (100%) rename {examples => docs/user-guide}/multi-pod.yaml (100%) rename docs/{ => user-guide}/namespaces.md (100%) rename {examples => docs/user-guide}/node-selection/README.md (100%) rename {examples => docs/user-guide}/node-selection/pod.yaml (100%) rename docs/{ => user-guide}/overview.md (100%) rename docs/{ => user-guide}/persistent-volumes.md (100%) rename {examples => docs/user-guide}/persistent-volumes/README.md (100%) rename {examples => docs/user-guide}/persistent-volumes/claims/claim-01.yaml (100%) rename {examples => docs/user-guide}/persistent-volumes/claims/claim-02.yaml (100%) rename {examples => docs/user-guide}/persistent-volumes/claims/claim-03.json (100%) rename {examples => docs/user-guide}/persistent-volumes/simpletest/namespace.json (100%) rename {examples => docs/user-guide}/persistent-volumes/simpletest/pod.yaml (100%) rename {examples => docs/user-guide}/persistent-volumes/simpletest/service.json (100%) rename {examples => docs/user-guide}/persistent-volumes/volumes/gce.yaml (100%) rename {examples => docs/user-guide}/persistent-volumes/volumes/local-01.yaml (100%) rename {examples => docs/user-guide}/persistent-volumes/volumes/local-02.yaml (100%) rename {examples => docs/user-guide}/persistent-volumes/volumes/nfs.yaml (100%) rename docs/{ => user-guide}/pod-states.md (100%) rename {examples => docs/user-guide}/pod.yaml (100%) rename docs/{ => user-guide}/pods.md (100%) rename docs/{ => user-guide}/replication-controller.md (100%) rename {examples => docs/user-guide}/replication.yaml (100%) rename {examples => docs/user-guide}/resourcequota/README.md (100%) rename {examples => docs/user-guide}/resourcequota/limits.yaml (100%) rename {examples => docs/user-guide}/resourcequota/namespace.yaml (100%) rename {examples => docs/user-guide}/resourcequota/quota.yaml (100%) rename docs/{ => user-guide}/secrets.md (100%) rename {examples => docs/user-guide}/secrets/README.md (100%) rename {examples => docs/user-guide}/secrets/secret-pod.yaml (100%) rename {examples => docs/user-guide}/secrets/secret.yaml (100%) rename docs/{ => user-guide}/security-context.md (100%) rename docs/{ => user-guide}/services-detail.png (100%) rename docs/{ => user-guide}/services-detail.svg (100%) rename docs/{ => user-guide}/services-overview.png (100%) rename docs/{ => user-guide}/services-overview.svg (100%) rename docs/{ => user-guide}/services.md (100%) rename docs/{ => user-guide}/sharing-clusters.md (100%) rename {examples => docs/user-guide}/simple-nginx.md (100%) rename {examples => docs/user-guide}/simple-yaml.md (100%) rename docs/{ => user-guide}/ui.md (100%) rename {examples => docs/user-guide}/update-demo/README.md (100%) rename {examples => docs/user-guide}/update-demo/build-images.sh (100%) rename {examples => docs/user-guide}/update-demo/images/kitten/Dockerfile (100%) rename {examples => docs/user-guide}/update-demo/images/kitten/html/data.json (100%) rename {examples => docs/user-guide}/update-demo/images/kitten/html/kitten.jpg (100%) rename {examples => docs/user-guide}/update-demo/images/nautilus/Dockerfile (100%) rename {examples => docs/user-guide}/update-demo/images/nautilus/html/data.json (100%) rename {examples => docs/user-guide}/update-demo/images/nautilus/html/nautilus.jpg (100%) rename {examples => docs/user-guide}/update-demo/kitten-rc.yaml (100%) rename {examples => docs/user-guide}/update-demo/local/LICENSE.angular (100%) rename {examples => docs/user-guide}/update-demo/local/angular.min.js (100%) rename {examples => docs/user-guide}/update-demo/local/angular.min.js.map (100%) rename {examples => docs/user-guide}/update-demo/local/index.html (100%) rename {examples => docs/user-guide}/update-demo/local/script.js (100%) rename {examples => docs/user-guide}/update-demo/local/style.css (100%) rename {examples => docs/user-guide}/update-demo/nautilus-rc.yaml (100%) rename docs/{ => user-guide}/user-guide.md (100%) rename docs/{ => user-guide}/volumes.md (100%) rename {examples => docs/user-guide}/walkthrough/README.md (100%) rename {examples => docs/user-guide}/walkthrough/k8s201.md (100%) rename {examples => docs/user-guide}/walkthrough/pod-with-http-healthcheck.yaml (100%) rename {examples => docs/user-guide}/walkthrough/pod1.yaml (100%) rename {examples => docs/user-guide}/walkthrough/pod2.yaml (100%) rename {examples => docs/user-guide}/walkthrough/podtemplate.json (100%) rename {examples => docs/user-guide}/walkthrough/replication-controller.yaml (100%) rename {examples => docs/user-guide}/walkthrough/service.yaml (100%) rename docs/{ => user-guide}/working-with-resources.md (100%) diff --git a/docs/accessing-the-cluster.md b/docs/user-guide/accessing-the-cluster.md similarity index 100% rename from docs/accessing-the-cluster.md rename to docs/user-guide/accessing-the-cluster.md diff --git a/docs/annotations.md b/docs/user-guide/annotations.md similarity index 100% rename from docs/annotations.md rename to docs/user-guide/annotations.md diff --git a/docs/application-troubleshooting.md b/docs/user-guide/application-troubleshooting.md similarity index 100% rename from docs/application-troubleshooting.md rename to docs/user-guide/application-troubleshooting.md diff --git a/docs/compute-resources.md b/docs/user-guide/compute-resources.md similarity index 100% rename from docs/compute-resources.md rename to docs/user-guide/compute-resources.md diff --git a/docs/container-environment.md b/docs/user-guide/container-environment.md similarity index 100% rename from docs/container-environment.md rename to docs/user-guide/container-environment.md diff --git a/docs/containers.md b/docs/user-guide/containers.md similarity index 100% rename from docs/containers.md rename to docs/user-guide/containers.md diff --git a/docs/debugging-services.md b/docs/user-guide/debugging-services.md similarity index 100% rename from docs/debugging-services.md rename to docs/user-guide/debugging-services.md diff --git a/docs/downward-api.md b/docs/user-guide/downward-api.md similarity index 100% rename from docs/downward-api.md rename to docs/user-guide/downward-api.md diff --git a/examples/downward-api/README.md b/docs/user-guide/downward-api/README.md similarity index 100% rename from examples/downward-api/README.md rename to docs/user-guide/downward-api/README.md diff --git a/examples/downward-api/dapi-pod.yaml b/docs/user-guide/downward-api/dapi-pod.yaml similarity index 100% rename from examples/downward-api/dapi-pod.yaml rename to docs/user-guide/downward-api/dapi-pod.yaml diff --git a/examples/environment-guide/README.md b/docs/user-guide/environment-guide/README.md similarity index 100% rename from examples/environment-guide/README.md rename to docs/user-guide/environment-guide/README.md diff --git a/examples/environment-guide/backend-rc.yaml b/docs/user-guide/environment-guide/backend-rc.yaml similarity index 100% rename from examples/environment-guide/backend-rc.yaml rename to docs/user-guide/environment-guide/backend-rc.yaml diff --git a/examples/environment-guide/backend-srv.yaml b/docs/user-guide/environment-guide/backend-srv.yaml similarity index 100% rename from examples/environment-guide/backend-srv.yaml rename to docs/user-guide/environment-guide/backend-srv.yaml diff --git a/examples/environment-guide/containers/README.md b/docs/user-guide/environment-guide/containers/README.md similarity index 100% rename from examples/environment-guide/containers/README.md rename to docs/user-guide/environment-guide/containers/README.md diff --git a/examples/environment-guide/containers/backend/Dockerfile b/docs/user-guide/environment-guide/containers/backend/Dockerfile similarity index 100% rename from examples/environment-guide/containers/backend/Dockerfile rename to docs/user-guide/environment-guide/containers/backend/Dockerfile diff --git a/examples/environment-guide/containers/backend/backend.go b/docs/user-guide/environment-guide/containers/backend/backend.go similarity index 100% rename from examples/environment-guide/containers/backend/backend.go rename to docs/user-guide/environment-guide/containers/backend/backend.go diff --git a/examples/environment-guide/containers/show/Dockerfile b/docs/user-guide/environment-guide/containers/show/Dockerfile similarity index 100% rename from examples/environment-guide/containers/show/Dockerfile rename to docs/user-guide/environment-guide/containers/show/Dockerfile diff --git a/examples/environment-guide/containers/show/show.go b/docs/user-guide/environment-guide/containers/show/show.go similarity index 100% rename from examples/environment-guide/containers/show/show.go rename to docs/user-guide/environment-guide/containers/show/show.go diff --git a/examples/environment-guide/diagram.png b/docs/user-guide/environment-guide/diagram.png similarity index 100% rename from examples/environment-guide/diagram.png rename to docs/user-guide/environment-guide/diagram.png diff --git a/examples/environment-guide/show-rc.yaml b/docs/user-guide/environment-guide/show-rc.yaml similarity index 100% rename from examples/environment-guide/show-rc.yaml rename to docs/user-guide/environment-guide/show-rc.yaml diff --git a/examples/environment-guide/show-srv.yaml b/docs/user-guide/environment-guide/show-srv.yaml similarity index 100% rename from examples/environment-guide/show-srv.yaml rename to docs/user-guide/environment-guide/show-srv.yaml diff --git a/docs/identifiers.md b/docs/user-guide/identifiers.md similarity index 100% rename from docs/identifiers.md rename to docs/user-guide/identifiers.md diff --git a/docs/images.md b/docs/user-guide/images.md similarity index 100% rename from docs/images.md rename to docs/user-guide/images.md diff --git a/docs/influx.png b/docs/user-guide/influx.png similarity index 100% rename from docs/influx.png rename to docs/user-guide/influx.png diff --git a/docs/k8s-ui-explore-filter.png b/docs/user-guide/k8s-ui-explore-filter.png similarity index 100% rename from docs/k8s-ui-explore-filter.png rename to docs/user-guide/k8s-ui-explore-filter.png diff --git a/docs/k8s-ui-explore-groupby.png b/docs/user-guide/k8s-ui-explore-groupby.png similarity index 100% rename from docs/k8s-ui-explore-groupby.png rename to docs/user-guide/k8s-ui-explore-groupby.png diff --git a/docs/k8s-ui-explore-poddetail.png b/docs/user-guide/k8s-ui-explore-poddetail.png similarity index 100% rename from docs/k8s-ui-explore-poddetail.png rename to docs/user-guide/k8s-ui-explore-poddetail.png diff --git a/docs/k8s-ui-explore.png b/docs/user-guide/k8s-ui-explore.png similarity index 100% rename from docs/k8s-ui-explore.png rename to docs/user-guide/k8s-ui-explore.png diff --git a/docs/k8s-ui-nodes.png b/docs/user-guide/k8s-ui-nodes.png similarity index 100% rename from docs/k8s-ui-nodes.png rename to docs/user-guide/k8s-ui-nodes.png diff --git a/docs/k8s-ui-overview.png b/docs/user-guide/k8s-ui-overview.png similarity index 100% rename from docs/k8s-ui-overview.png rename to docs/user-guide/k8s-ui-overview.png diff --git a/docs/kibana.png b/docs/user-guide/kibana.png similarity index 100% rename from docs/kibana.png rename to docs/user-guide/kibana.png diff --git a/docs/kubeconfig-file.md b/docs/user-guide/kubeconfig-file.md similarity index 100% rename from docs/kubeconfig-file.md rename to docs/user-guide/kubeconfig-file.md diff --git a/docs/labels.md b/docs/user-guide/labels.md similarity index 100% rename from docs/labels.md rename to docs/user-guide/labels.md diff --git a/examples/limitrange/README.md b/docs/user-guide/limitrange/README.md similarity index 100% rename from examples/limitrange/README.md rename to docs/user-guide/limitrange/README.md diff --git a/examples/limitrange/invalid-pod.yaml b/docs/user-guide/limitrange/invalid-pod.yaml similarity index 100% rename from examples/limitrange/invalid-pod.yaml rename to docs/user-guide/limitrange/invalid-pod.yaml diff --git a/examples/limitrange/limits.yaml b/docs/user-guide/limitrange/limits.yaml similarity index 100% rename from examples/limitrange/limits.yaml rename to docs/user-guide/limitrange/limits.yaml diff --git a/examples/limitrange/namespace.yaml b/docs/user-guide/limitrange/namespace.yaml similarity index 100% rename from examples/limitrange/namespace.yaml rename to docs/user-guide/limitrange/namespace.yaml diff --git a/examples/limitrange/valid-pod.yaml b/docs/user-guide/limitrange/valid-pod.yaml similarity index 100% rename from examples/limitrange/valid-pod.yaml rename to docs/user-guide/limitrange/valid-pod.yaml diff --git a/examples/liveness/README.md b/docs/user-guide/liveness/README.md similarity index 100% rename from examples/liveness/README.md rename to docs/user-guide/liveness/README.md diff --git a/examples/liveness/exec-liveness.yaml b/docs/user-guide/liveness/exec-liveness.yaml similarity index 100% rename from examples/liveness/exec-liveness.yaml rename to docs/user-guide/liveness/exec-liveness.yaml diff --git a/examples/liveness/http-liveness.yaml b/docs/user-guide/liveness/http-liveness.yaml similarity index 100% rename from examples/liveness/http-liveness.yaml rename to docs/user-guide/liveness/http-liveness.yaml diff --git a/examples/liveness/image/Dockerfile b/docs/user-guide/liveness/image/Dockerfile similarity index 100% rename from examples/liveness/image/Dockerfile rename to docs/user-guide/liveness/image/Dockerfile diff --git a/examples/liveness/image/Makefile b/docs/user-guide/liveness/image/Makefile similarity index 100% rename from examples/liveness/image/Makefile rename to docs/user-guide/liveness/image/Makefile diff --git a/examples/liveness/image/server.go b/docs/user-guide/liveness/image/server.go similarity index 100% rename from examples/liveness/image/server.go rename to docs/user-guide/liveness/image/server.go diff --git a/examples/logging-demo/Makefile b/docs/user-guide/logging-demo/Makefile similarity index 100% rename from examples/logging-demo/Makefile rename to docs/user-guide/logging-demo/Makefile diff --git a/examples/logging-demo/README.md b/docs/user-guide/logging-demo/README.md similarity index 100% rename from examples/logging-demo/README.md rename to docs/user-guide/logging-demo/README.md diff --git a/examples/logging-demo/synth-logger.png b/docs/user-guide/logging-demo/synth-logger.png similarity index 100% rename from examples/logging-demo/synth-logger.png rename to docs/user-guide/logging-demo/synth-logger.png diff --git a/examples/logging-demo/synthetic_0_25lps.yaml b/docs/user-guide/logging-demo/synthetic_0_25lps.yaml similarity index 100% rename from examples/logging-demo/synthetic_0_25lps.yaml rename to docs/user-guide/logging-demo/synthetic_0_25lps.yaml diff --git a/examples/logging-demo/synthetic_10lps.yaml b/docs/user-guide/logging-demo/synthetic_10lps.yaml similarity index 100% rename from examples/logging-demo/synthetic_10lps.yaml rename to docs/user-guide/logging-demo/synthetic_10lps.yaml diff --git a/docs/logging.md b/docs/user-guide/logging.md similarity index 100% rename from docs/logging.md rename to docs/user-guide/logging.md diff --git a/docs/monitoring-architecture.png b/docs/user-guide/monitoring-architecture.png similarity index 100% rename from docs/monitoring-architecture.png rename to docs/user-guide/monitoring-architecture.png diff --git a/docs/monitoring.md b/docs/user-guide/monitoring.md similarity index 100% rename from docs/monitoring.md rename to docs/user-guide/monitoring.md diff --git a/examples/multi-pod.yaml b/docs/user-guide/multi-pod.yaml similarity index 100% rename from examples/multi-pod.yaml rename to docs/user-guide/multi-pod.yaml diff --git a/docs/namespaces.md b/docs/user-guide/namespaces.md similarity index 100% rename from docs/namespaces.md rename to docs/user-guide/namespaces.md diff --git a/examples/node-selection/README.md b/docs/user-guide/node-selection/README.md similarity index 100% rename from examples/node-selection/README.md rename to docs/user-guide/node-selection/README.md diff --git a/examples/node-selection/pod.yaml b/docs/user-guide/node-selection/pod.yaml similarity index 100% rename from examples/node-selection/pod.yaml rename to docs/user-guide/node-selection/pod.yaml diff --git a/docs/overview.md b/docs/user-guide/overview.md similarity index 100% rename from docs/overview.md rename to docs/user-guide/overview.md diff --git a/docs/persistent-volumes.md b/docs/user-guide/persistent-volumes.md similarity index 100% rename from docs/persistent-volumes.md rename to docs/user-guide/persistent-volumes.md diff --git a/examples/persistent-volumes/README.md b/docs/user-guide/persistent-volumes/README.md similarity index 100% rename from examples/persistent-volumes/README.md rename to docs/user-guide/persistent-volumes/README.md diff --git a/examples/persistent-volumes/claims/claim-01.yaml b/docs/user-guide/persistent-volumes/claims/claim-01.yaml similarity index 100% rename from examples/persistent-volumes/claims/claim-01.yaml rename to docs/user-guide/persistent-volumes/claims/claim-01.yaml diff --git a/examples/persistent-volumes/claims/claim-02.yaml b/docs/user-guide/persistent-volumes/claims/claim-02.yaml similarity index 100% rename from examples/persistent-volumes/claims/claim-02.yaml rename to docs/user-guide/persistent-volumes/claims/claim-02.yaml diff --git a/examples/persistent-volumes/claims/claim-03.json b/docs/user-guide/persistent-volumes/claims/claim-03.json similarity index 100% rename from examples/persistent-volumes/claims/claim-03.json rename to docs/user-guide/persistent-volumes/claims/claim-03.json diff --git a/examples/persistent-volumes/simpletest/namespace.json b/docs/user-guide/persistent-volumes/simpletest/namespace.json similarity index 100% rename from examples/persistent-volumes/simpletest/namespace.json rename to docs/user-guide/persistent-volumes/simpletest/namespace.json diff --git a/examples/persistent-volumes/simpletest/pod.yaml b/docs/user-guide/persistent-volumes/simpletest/pod.yaml similarity index 100% rename from examples/persistent-volumes/simpletest/pod.yaml rename to docs/user-guide/persistent-volumes/simpletest/pod.yaml diff --git a/examples/persistent-volumes/simpletest/service.json b/docs/user-guide/persistent-volumes/simpletest/service.json similarity index 100% rename from examples/persistent-volumes/simpletest/service.json rename to docs/user-guide/persistent-volumes/simpletest/service.json diff --git a/examples/persistent-volumes/volumes/gce.yaml b/docs/user-guide/persistent-volumes/volumes/gce.yaml similarity index 100% rename from examples/persistent-volumes/volumes/gce.yaml rename to docs/user-guide/persistent-volumes/volumes/gce.yaml diff --git a/examples/persistent-volumes/volumes/local-01.yaml b/docs/user-guide/persistent-volumes/volumes/local-01.yaml similarity index 100% rename from examples/persistent-volumes/volumes/local-01.yaml rename to docs/user-guide/persistent-volumes/volumes/local-01.yaml diff --git a/examples/persistent-volumes/volumes/local-02.yaml b/docs/user-guide/persistent-volumes/volumes/local-02.yaml similarity index 100% rename from examples/persistent-volumes/volumes/local-02.yaml rename to docs/user-guide/persistent-volumes/volumes/local-02.yaml diff --git a/examples/persistent-volumes/volumes/nfs.yaml b/docs/user-guide/persistent-volumes/volumes/nfs.yaml similarity index 100% rename from examples/persistent-volumes/volumes/nfs.yaml rename to docs/user-guide/persistent-volumes/volumes/nfs.yaml diff --git a/docs/pod-states.md b/docs/user-guide/pod-states.md similarity index 100% rename from docs/pod-states.md rename to docs/user-guide/pod-states.md diff --git a/examples/pod.yaml b/docs/user-guide/pod.yaml similarity index 100% rename from examples/pod.yaml rename to docs/user-guide/pod.yaml diff --git a/docs/pods.md b/docs/user-guide/pods.md similarity index 100% rename from docs/pods.md rename to docs/user-guide/pods.md diff --git a/docs/replication-controller.md b/docs/user-guide/replication-controller.md similarity index 100% rename from docs/replication-controller.md rename to docs/user-guide/replication-controller.md diff --git a/examples/replication.yaml b/docs/user-guide/replication.yaml similarity index 100% rename from examples/replication.yaml rename to docs/user-guide/replication.yaml diff --git a/examples/resourcequota/README.md b/docs/user-guide/resourcequota/README.md similarity index 100% rename from examples/resourcequota/README.md rename to docs/user-guide/resourcequota/README.md diff --git a/examples/resourcequota/limits.yaml b/docs/user-guide/resourcequota/limits.yaml similarity index 100% rename from examples/resourcequota/limits.yaml rename to docs/user-guide/resourcequota/limits.yaml diff --git a/examples/resourcequota/namespace.yaml b/docs/user-guide/resourcequota/namespace.yaml similarity index 100% rename from examples/resourcequota/namespace.yaml rename to docs/user-guide/resourcequota/namespace.yaml diff --git a/examples/resourcequota/quota.yaml b/docs/user-guide/resourcequota/quota.yaml similarity index 100% rename from examples/resourcequota/quota.yaml rename to docs/user-guide/resourcequota/quota.yaml diff --git a/docs/secrets.md b/docs/user-guide/secrets.md similarity index 100% rename from docs/secrets.md rename to docs/user-guide/secrets.md diff --git a/examples/secrets/README.md b/docs/user-guide/secrets/README.md similarity index 100% rename from examples/secrets/README.md rename to docs/user-guide/secrets/README.md diff --git a/examples/secrets/secret-pod.yaml b/docs/user-guide/secrets/secret-pod.yaml similarity index 100% rename from examples/secrets/secret-pod.yaml rename to docs/user-guide/secrets/secret-pod.yaml diff --git a/examples/secrets/secret.yaml b/docs/user-guide/secrets/secret.yaml similarity index 100% rename from examples/secrets/secret.yaml rename to docs/user-guide/secrets/secret.yaml diff --git a/docs/security-context.md b/docs/user-guide/security-context.md similarity index 100% rename from docs/security-context.md rename to docs/user-guide/security-context.md diff --git a/docs/services-detail.png b/docs/user-guide/services-detail.png similarity index 100% rename from docs/services-detail.png rename to docs/user-guide/services-detail.png diff --git a/docs/services-detail.svg b/docs/user-guide/services-detail.svg similarity index 100% rename from docs/services-detail.svg rename to docs/user-guide/services-detail.svg diff --git a/docs/services-overview.png b/docs/user-guide/services-overview.png similarity index 100% rename from docs/services-overview.png rename to docs/user-guide/services-overview.png diff --git a/docs/services-overview.svg b/docs/user-guide/services-overview.svg similarity index 100% rename from docs/services-overview.svg rename to docs/user-guide/services-overview.svg diff --git a/docs/services.md b/docs/user-guide/services.md similarity index 100% rename from docs/services.md rename to docs/user-guide/services.md diff --git a/docs/sharing-clusters.md b/docs/user-guide/sharing-clusters.md similarity index 100% rename from docs/sharing-clusters.md rename to docs/user-guide/sharing-clusters.md diff --git a/examples/simple-nginx.md b/docs/user-guide/simple-nginx.md similarity index 100% rename from examples/simple-nginx.md rename to docs/user-guide/simple-nginx.md diff --git a/examples/simple-yaml.md b/docs/user-guide/simple-yaml.md similarity index 100% rename from examples/simple-yaml.md rename to docs/user-guide/simple-yaml.md diff --git a/docs/ui.md b/docs/user-guide/ui.md similarity index 100% rename from docs/ui.md rename to docs/user-guide/ui.md diff --git a/examples/update-demo/README.md b/docs/user-guide/update-demo/README.md similarity index 100% rename from examples/update-demo/README.md rename to docs/user-guide/update-demo/README.md diff --git a/examples/update-demo/build-images.sh b/docs/user-guide/update-demo/build-images.sh similarity index 100% rename from examples/update-demo/build-images.sh rename to docs/user-guide/update-demo/build-images.sh diff --git a/examples/update-demo/images/kitten/Dockerfile b/docs/user-guide/update-demo/images/kitten/Dockerfile similarity index 100% rename from examples/update-demo/images/kitten/Dockerfile rename to docs/user-guide/update-demo/images/kitten/Dockerfile diff --git a/examples/update-demo/images/kitten/html/data.json b/docs/user-guide/update-demo/images/kitten/html/data.json similarity index 100% rename from examples/update-demo/images/kitten/html/data.json rename to docs/user-guide/update-demo/images/kitten/html/data.json diff --git a/examples/update-demo/images/kitten/html/kitten.jpg b/docs/user-guide/update-demo/images/kitten/html/kitten.jpg similarity index 100% rename from examples/update-demo/images/kitten/html/kitten.jpg rename to docs/user-guide/update-demo/images/kitten/html/kitten.jpg diff --git a/examples/update-demo/images/nautilus/Dockerfile b/docs/user-guide/update-demo/images/nautilus/Dockerfile similarity index 100% rename from examples/update-demo/images/nautilus/Dockerfile rename to docs/user-guide/update-demo/images/nautilus/Dockerfile diff --git a/examples/update-demo/images/nautilus/html/data.json b/docs/user-guide/update-demo/images/nautilus/html/data.json similarity index 100% rename from examples/update-demo/images/nautilus/html/data.json rename to docs/user-guide/update-demo/images/nautilus/html/data.json diff --git a/examples/update-demo/images/nautilus/html/nautilus.jpg b/docs/user-guide/update-demo/images/nautilus/html/nautilus.jpg similarity index 100% rename from examples/update-demo/images/nautilus/html/nautilus.jpg rename to docs/user-guide/update-demo/images/nautilus/html/nautilus.jpg diff --git a/examples/update-demo/kitten-rc.yaml b/docs/user-guide/update-demo/kitten-rc.yaml similarity index 100% rename from examples/update-demo/kitten-rc.yaml rename to docs/user-guide/update-demo/kitten-rc.yaml diff --git a/examples/update-demo/local/LICENSE.angular b/docs/user-guide/update-demo/local/LICENSE.angular similarity index 100% rename from examples/update-demo/local/LICENSE.angular rename to docs/user-guide/update-demo/local/LICENSE.angular diff --git a/examples/update-demo/local/angular.min.js b/docs/user-guide/update-demo/local/angular.min.js similarity index 100% rename from examples/update-demo/local/angular.min.js rename to docs/user-guide/update-demo/local/angular.min.js diff --git a/examples/update-demo/local/angular.min.js.map b/docs/user-guide/update-demo/local/angular.min.js.map similarity index 100% rename from examples/update-demo/local/angular.min.js.map rename to docs/user-guide/update-demo/local/angular.min.js.map diff --git a/examples/update-demo/local/index.html b/docs/user-guide/update-demo/local/index.html similarity index 100% rename from examples/update-demo/local/index.html rename to docs/user-guide/update-demo/local/index.html diff --git a/examples/update-demo/local/script.js b/docs/user-guide/update-demo/local/script.js similarity index 100% rename from examples/update-demo/local/script.js rename to docs/user-guide/update-demo/local/script.js diff --git a/examples/update-demo/local/style.css b/docs/user-guide/update-demo/local/style.css similarity index 100% rename from examples/update-demo/local/style.css rename to docs/user-guide/update-demo/local/style.css diff --git a/examples/update-demo/nautilus-rc.yaml b/docs/user-guide/update-demo/nautilus-rc.yaml similarity index 100% rename from examples/update-demo/nautilus-rc.yaml rename to docs/user-guide/update-demo/nautilus-rc.yaml diff --git a/docs/user-guide.md b/docs/user-guide/user-guide.md similarity index 100% rename from docs/user-guide.md rename to docs/user-guide/user-guide.md diff --git a/docs/volumes.md b/docs/user-guide/volumes.md similarity index 100% rename from docs/volumes.md rename to docs/user-guide/volumes.md diff --git a/examples/walkthrough/README.md b/docs/user-guide/walkthrough/README.md similarity index 100% rename from examples/walkthrough/README.md rename to docs/user-guide/walkthrough/README.md diff --git a/examples/walkthrough/k8s201.md b/docs/user-guide/walkthrough/k8s201.md similarity index 100% rename from examples/walkthrough/k8s201.md rename to docs/user-guide/walkthrough/k8s201.md diff --git a/examples/walkthrough/pod-with-http-healthcheck.yaml b/docs/user-guide/walkthrough/pod-with-http-healthcheck.yaml similarity index 100% rename from examples/walkthrough/pod-with-http-healthcheck.yaml rename to docs/user-guide/walkthrough/pod-with-http-healthcheck.yaml diff --git a/examples/walkthrough/pod1.yaml b/docs/user-guide/walkthrough/pod1.yaml similarity index 100% rename from examples/walkthrough/pod1.yaml rename to docs/user-guide/walkthrough/pod1.yaml diff --git a/examples/walkthrough/pod2.yaml b/docs/user-guide/walkthrough/pod2.yaml similarity index 100% rename from examples/walkthrough/pod2.yaml rename to docs/user-guide/walkthrough/pod2.yaml diff --git a/examples/walkthrough/podtemplate.json b/docs/user-guide/walkthrough/podtemplate.json similarity index 100% rename from examples/walkthrough/podtemplate.json rename to docs/user-guide/walkthrough/podtemplate.json diff --git a/examples/walkthrough/replication-controller.yaml b/docs/user-guide/walkthrough/replication-controller.yaml similarity index 100% rename from examples/walkthrough/replication-controller.yaml rename to docs/user-guide/walkthrough/replication-controller.yaml diff --git a/examples/walkthrough/service.yaml b/docs/user-guide/walkthrough/service.yaml similarity index 100% rename from examples/walkthrough/service.yaml rename to docs/user-guide/walkthrough/service.yaml diff --git a/docs/working-with-resources.md b/docs/user-guide/working-with-resources.md similarity index 100% rename from docs/working-with-resources.md rename to docs/user-guide/working-with-resources.md From 3eff8fce41a986e229eec38c1c16b9340ca6339b Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Tue, 14 Jul 2015 09:37:37 -0700 Subject: [PATCH 2/3] automated link fixes --- docs/README.md | 2 +- docs/admin/README.md | 6 ++-- docs/admin/accessing-the-api.md | 2 +- docs/admin/admission-controllers.md | 2 +- docs/admin/namespaces.md | 4 +-- docs/admin/networking.md | 6 ++-- docs/admin/node.md | 4 +-- docs/api-conventions.md | 18 +++++------ docs/api.md | 8 ++--- docs/cluster-large.md | 2 +- docs/cluster-troubleshooting.md | 2 +- docs/design/access.md | 2 +- docs/design/architecture.md | 6 ++-- docs/design/networking.md | 2 +- docs/design/resources.md | 4 +-- docs/developer-guide.md | 4 +-- docs/getting-started-guides/aws.md | 2 +- docs/getting-started-guides/azure.md | 2 +- .../centos/centos_manual_config.md | 2 +- .../coreos/bare_metal_offline.md | 2 +- docs/getting-started-guides/docker.md | 2 +- .../fedora/fedora_manual_config.md | 2 +- docs/getting-started-guides/gce.md | 8 ++--- docs/getting-started-guides/locally.md | 4 +-- docs/getting-started-guides/rkt/README.md | 2 +- docs/getting-started-guides/scratch.md | 6 ++-- docs/glossary.md | 20 ++++++------- docs/proposals/autoscaling.md | 6 ++-- docs/service-accounts.md | 4 +-- docs/troubleshooting.md | 2 +- docs/user-guide/accessing-the-cluster.md | 24 +++++++-------- docs/user-guide/annotations.md | 2 +- .../user-guide/application-troubleshooting.md | 4 +-- docs/user-guide/compute-resources.md | 8 ++--- docs/user-guide/configuring-containers.md | 4 +-- docs/user-guide/connecting-applications.md | 4 +-- ...connecting-to-applications-port-forward.md | 4 +-- .../connecting-to-applications-proxy.md | 6 ++-- docs/user-guide/container-environment.md | 2 +- docs/user-guide/containers.md | 2 +- docs/user-guide/debugging-services.md | 2 +- docs/user-guide/deploying-applications.md | 6 ++-- docs/user-guide/downward-api.md | 6 ++-- docs/user-guide/downward-api/README.md | 4 +-- docs/user-guide/environment-guide/README.md | 10 +++---- .../environment-guide/containers/README.md | 2 +- docs/user-guide/getting-into-containers.md | 2 +- docs/user-guide/identifiers.md | 4 +-- docs/user-guide/images.md | 4 +-- docs/user-guide/kubeconfig-file.md | 4 +-- docs/user-guide/labels.md | 2 +- docs/user-guide/limitrange/README.md | 4 +-- docs/user-guide/liveness/README.md | 2 +- docs/user-guide/logging-demo/README.md | 8 ++--- docs/user-guide/logging.md | 12 ++++---- docs/user-guide/managing-deployments.md | 4 +-- docs/user-guide/monitoring.md | 8 ++--- docs/user-guide/namespaces.md | 4 +-- docs/user-guide/node-selection/README.md | 4 +-- docs/user-guide/overview.md | 12 ++++---- docs/user-guide/persistent-volumes.md | 8 ++--- docs/user-guide/persistent-volumes/README.md | 2 +- docs/user-guide/pod-states.md | 4 +-- docs/user-guide/pods.md | 4 +-- docs/user-guide/prereqs.md | 2 +- docs/user-guide/production-pods.md | 30 +++++++++---------- docs/user-guide/replication-controller.md | 6 ++-- docs/user-guide/resourcequota/README.md | 2 +- docs/user-guide/secrets.md | 16 +++++----- docs/user-guide/secrets/README.md | 4 +-- docs/user-guide/security-context.md | 4 +-- docs/user-guide/services.md | 4 +-- docs/user-guide/sharing-clusters.md | 2 +- docs/user-guide/simple-nginx.md | 8 ++--- docs/user-guide/simple-yaml.md | 4 +-- docs/user-guide/ui.md | 6 ++-- docs/user-guide/update-demo/README.md | 8 ++--- docs/user-guide/user-guide.md | 22 +++++++------- docs/user-guide/volumes.md | 14 ++++----- docs/user-guide/walkthrough/README.md | 10 +++---- docs/user-guide/walkthrough/k8s201.md | 10 +++---- docs/user-guide/working-with-resources.md | 12 ++++---- examples/cassandra/README.md | 6 ++-- examples/cluster-dns/README.md | 8 ++--- examples/elasticsearch/README.md | 8 ++--- examples/guestbook-go/README.md | 4 +-- examples/guestbook/README.md | 6 ++-- examples/hazelcast/README.md | 6 ++-- examples/kubernetes-namespaces/README.md | 6 ++-- examples/meteor/README.md | 8 ++--- examples/mysql-wordpress-pd/README.md | 12 ++++---- examples/phabricator/README.md | 6 ++-- examples/redis/README.md | 6 ++-- examples/rethinkdb/README.md | 2 +- examples/spark/README.md | 6 ++-- examples/storm/README.md | 6 ++-- 96 files changed, 287 insertions(+), 287 deletions(-) diff --git a/docs/README.md b/docs/README.md index dcbca9ab13..4aed36ac8e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -14,7 +14,7 @@ certainly want the docs that go with that version. # Kubernetes Documentation: releases.k8s.io/HEAD -* The [User's guide](user-guide.md) is for anyone who wants to run programs and +* The [User's guide](user-guide/user-guide.md) is for anyone who wants to run programs and services on an existing Kubernetes cluster. * The [Cluster Admin's guide](admin/README.md) is for anyone setting up diff --git a/docs/admin/README.md b/docs/admin/README.md index 158cbd9ca2..0be5d83558 100644 --- a/docs/admin/README.md +++ b/docs/admin/README.md @@ -15,7 +15,7 @@ certainly want the docs that go with that version. # Kubernetes Cluster Admin Guide The cluster admin guide is for anyone creating or administering a Kubernetes cluster. -It assumes some familiarity with concepts in the [User Guide](../user-guide.md). +It assumes some familiarity with concepts in the [User Guide](../user-guide/user-guide.md). ## Planning a cluster @@ -63,7 +63,7 @@ project.](salt.md). * **DNS Integration with SkyDNS** ([dns.md](dns.md)): Resolving a DNS name directly to a Kubernetes service. -* **Logging** with [Kibana](../logging.md) +* **Logging** with [Kibana](../user-guide/logging.md) ## Multi-tenant support @@ -74,7 +74,7 @@ project.](salt.md). ## Security -* **Kubernetes Container Environment** ([docs/container-environment.md](../container-environment.md)): +* **Kubernetes Container Environment** ([docs/user-guide/container-environment.md](../user-guide/container-environment.md)): Describes the environment for Kubelet managed containers on a Kubernetes node. diff --git a/docs/admin/accessing-the-api.md b/docs/admin/accessing-the-api.md index ed1363b409..6598b5d34b 100644 --- a/docs/admin/accessing-the-api.md +++ b/docs/admin/accessing-the-api.md @@ -20,7 +20,7 @@ cluster administrators who want to customize their cluster or understand the details. Most questions about accessing the cluster are covered -in [Accessing the cluster](../accessing-the-cluster.md). +in [Accessing the cluster](../user-guide/accessing-the-cluster.md). ## Ports and IPs Served On diff --git a/docs/admin/admission-controllers.md b/docs/admin/admission-controllers.md index 5a5f80baa6..40ed32cf8d 100644 --- a/docs/admin/admission-controllers.md +++ b/docs/admin/admission-controllers.md @@ -86,7 +86,7 @@ We strongly recommend using this plug-in if you intend to make use of Kubernetes ### SecurityContextDeny -This plug-in will deny any pod with a [SecurityContext](../security-context.md) that defines options that were not available on the ```Container```. +This plug-in will deny any pod with a [SecurityContext](../user-guide/security-context.md) that defines options that were not available on the ```Container```. ### ResourceQuota diff --git a/docs/admin/namespaces.md b/docs/admin/namespaces.md index 6720026b3b..8a4b5941cf 100644 --- a/docs/admin/namespaces.md +++ b/docs/admin/namespaces.md @@ -14,7 +14,7 @@ certainly want the docs that go with that version. # Namespaces -Namespaces help different projects, teams, or customers to share a kubernetes cluster. First, they provide a scope for [Names](../identifiers.md). Second, as our access control code develops, it is expected that it will be convenient to attach authorization and other policy to namespaces. +Namespaces help different projects, teams, or customers to share a kubernetes cluster. First, they provide a scope for [Names](../user-guide/identifiers.md). Second, as our access control code develops, it is expected that it will be convenient to attach authorization and other policy to namespaces. Use of multiple namespaces is optional. For small teams, they may not be needed. @@ -23,7 +23,7 @@ This is a placeholder document about namespace administration. TODO: document namespace creation, ownership assignment, visibility rules, policy creation, interaction with network. -Namespaces are still under development. For now, the best documentation is the [Namespaces Design Document](../design/namespaces.md). The user documentation can be found at [Namespaces](../../docs/namespaces.md) +Namespaces are still under development. For now, the best documentation is the [Namespaces Design Document](../design/namespaces.md). The user documentation can be found at [Namespaces](../../docs/user-guide/namespaces.md) diff --git a/docs/admin/networking.md b/docs/admin/networking.md index b12d49b0e5..0ad1e39eee 100644 --- a/docs/admin/networking.md +++ b/docs/admin/networking.md @@ -34,10 +34,10 @@ certainly want the docs that go with that version. Kubernetes approaches networking somewhat differently than Docker does by default. There are 4 distinct networking problems to solve: 1. Highly-coupled container-to-container communications: this is solved by - [pods](../pods.md) and `localhost` communications. + [pods](../user-guide/pods.md) and `localhost` communications. 2. Pod-to-Pod communications: this is the primary focus of this document. -3. Pod-to-Service communications: this is covered by [services](../services.md). -4. External-to-Service communications: this is covered by [services](../services.md). +3. Pod-to-Service communications: this is covered by [services](../user-guide/services.md). +4. External-to-Service communications: this is covered by [services](../user-guide/services.md). ## Summary diff --git a/docs/admin/node.md b/docs/admin/node.md index 321dfac323..8e9d5f4c0d 100644 --- a/docs/admin/node.md +++ b/docs/admin/node.md @@ -36,7 +36,7 @@ certainly want the docs that go with that version. `Node` is a worker machine in Kubernetes, previously known as `Minion`. Node may be a VM or physical machine, depending on the cluster. Each node has -the services necessary to run [Pods](../pods.md) and be managed from the master +the services necessary to run [Pods](../user-guide/pods.md) and be managed from the master systems. The services include docker, kubelet and network proxy. See [The Kubernetes Node](../design/architecture.md#the-kubernetes-node) section in design doc for more details. @@ -101,7 +101,7 @@ The information is gathered by Kubernetes from the node. ## Node Management -Unlike [Pods](../pods.md) and [Services](../services.md), a Node is not inherently +Unlike [Pods](../user-guide/pods.md) and [Services](../user-guide/services.md), a Node is not inherently created by Kubernetes: it is either created from cloud providers like Google Compute Engine, or from your physical or virtual machines. What this means is that when Kubernetes creates a node, it only creates a representation for the node. diff --git a/docs/api-conventions.md b/docs/api-conventions.md index 626c0354ba..fe4c978689 100644 --- a/docs/api-conventions.md +++ b/docs/api-conventions.md @@ -87,7 +87,7 @@ Kinds are grouped into three categories: Most objects defined in the system should have an endpoint that returns the full set of resources, as well as zero or more endpoints that return subsets of the full list. Some objects may be singletons (the current user, the system defaults) and may not have lists. - In addition, all lists that return objects with labels should support label filtering (see [labels.md](labels.md), and most lists should support filtering by fields. + In addition, all lists that return objects with labels should support label filtering (see [user-guide/labels.md](user-guide/labels.md), and most lists should support filtering by fields. Examples: PodLists, ServiceLists, NodeLists @@ -120,17 +120,17 @@ These fields are required for proper decoding of the object. They may be populat Every object kind MUST have the following metadata in a nested object field called "metadata": -* namespace: a namespace is a DNS compatible subdomain that objects are subdivided into. The default namespace is 'default'. See [namespaces.md](namespaces.md) for more. -* name: a string that uniquely identifies this object within the current namespace (see [identifiers.md](identifiers.md)). This value is used in the path when retrieving an individual object. -* uid: a unique in time and space value (typically an RFC 4122 generated identifier, see [identifiers.md](identifiers.md)) used to distinguish between objects with the same name that have been deleted and recreated +* namespace: a namespace is a DNS compatible subdomain that objects are subdivided into. The default namespace is 'default'. See [admin/namespaces.md](admin/namespaces.md) for more. +* name: a string that uniquely identifies this object within the current namespace (see [user-guide/identifiers.md](user-guide/identifiers.md)). This value is used in the path when retrieving an individual object. +* uid: a unique in time and space value (typically an RFC 4122 generated identifier, see [user-guide/identifiers.md](user-guide/identifiers.md)) used to distinguish between objects with the same name that have been deleted and recreated Every object SHOULD have the following metadata in a nested object field called "metadata": * resourceVersion: a string that identifies the internal version of this object that can be used by clients to determine when objects have changed. This value MUST be treated as opaque by clients and passed unmodified back to the server. Clients should not assume that the resource version has meaning across namespaces, different kinds of resources, or different servers. (see [concurrency control](#concurrency-control-and-consistency), below, for more details) * creationTimestamp: a string representing an RFC 3339 date of the date and time an object was created * deletionTimestamp: a string representing an RFC 3339 date of the date and time after which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource will be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. -* labels: a map of string keys and values that can be used to organize and categorize objects (see [labels.md](labels.md)) -* annotations: a map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about this object (see [annotations.md](annotations.md)) +* labels: a map of string keys and values that can be used to organize and categorize objects (see [user-guide/labels.md](user-guide/labels.md)) +* annotations: a map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about this object (see [user-guide/annotations.md](user-guide/annotations.md)) Labels are intended for organizational purposes by end users (select the pods that match this label query). Annotations enable third-party automation and tooling to decorate objects with additional metadata for their own use. @@ -167,7 +167,7 @@ Status information that may be large (especially unbounded in size, such as list #### References to related objects -References to loosely coupled sets of objects, such as [pods](pods.md) overseen by a [replication controller](replication-controller.md), are usually best referred to using a [label selector](labels.md). In order to ensure that GETs of individual objects remain bounded in time and space, these sets may be queried via separate API queries, but will not be expanded in the referring object's status. +References to loosely coupled sets of objects, such as [pods](user-guide/pods.md) overseen by a [replication controller](user-guide/replication-controller.md), are usually best referred to using a [label selector](user-guide/labels.md). In order to ensure that GETs of individual objects remain bounded in time and space, these sets may be queried via separate API queries, but will not be expanded in the referring object's status. References to specific objects, especially specific resource versions and/or specific fields of those objects, are specified using the `ObjectReference` type. Unlike partial URLs, the ObjectReference type facilitates flexible defaulting of fields from the referring object or other contextual information. @@ -234,7 +234,7 @@ Kubernetes by convention exposes additional verbs as new root endpoints with sin These are verbs which change the fundamental type of data returned (watch returns a stream of JSON instead of a single JSON object). Support of additional verbs is not required for all object types. -Two additional verbs `redirect` and `proxy` provide access to cluster resources as described in [accessing-the-cluster.md](accessing-the-cluster.md). +Two additional verbs `redirect` and `proxy` provide access to cluster resources as described in [user-guide/accessing-the-cluster.md](user-guide/accessing-the-cluster.md). When resources wish to expose alternative actions that are closely coupled to a single resource, they should do so using new sub-resources. An example is allowing automated processes to update the "status" field of a Pod. The `/pods` endpoint only allows updates to "metadata" and "spec", since those reflect end-user intent. An automated process should be able to modify status for users to see by sending an updated Pod kind to the server to the "/pods/<name>/status" endpoint - the alternate endpoint allows different rules to be applied to the update, and access to be appropriately restricted. Likewise, some actions like "stop" or "scale" are best represented as REST sub-resources that are POSTed to. The POST action may require a simple kind to be provided if the action requires parameters, or function without a request body. @@ -324,7 +324,7 @@ labels: ## Idempotency -All compatible Kubernetes APIs MUST support "name idempotency" and respond with an HTTP status code 409 when a request is made to POST an object that has the same name as an existing object in the system. See [identifiers.md](identifiers.md) for details. +All compatible Kubernetes APIs MUST support "name idempotency" and respond with an HTTP status code 409 when a request is made to POST an object that has the same name as an existing object in the system. See [user-guide/identifiers.md](user-guide/identifiers.md) for details. Names generated by the system may be requested using `metadata.generateName`. GenerateName indicates that the name should be made unique by the server prior to persisting it. A non-empty value for the field indicates the name will be made unique (and the name returned to the client will be different than the name passed). The value of this field will be combined with a unique suffix on the server if the Name field has not been provided. The provided value must be valid within the rules for Name, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified, and Name is not present, the server will NOT return a 409 if the generated name exists - instead, it will either return 201 Created or 504 with Reason `ServerTimeout` indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). diff --git a/docs/api.md b/docs/api.md index c44046e07a..778ff593be 100644 --- a/docs/api.md +++ b/docs/api.md @@ -14,7 +14,7 @@ certainly want the docs that go with that version. # The Kubernetes API -Primary system and API concepts are documented in the [User guide](user-guide.md). +Primary system and API concepts are documented in the [User guide](user-guide/user-guide.md). Overall API conventions are described in the [API conventions doc](api-conventions.md). @@ -54,11 +54,11 @@ Changes to services are the most significant difference between v1beta3 and v1. * The `service.spec.portalIP` property is renamed to `service.spec.clusterIP`. * The `service.spec.createExternalLoadBalancer` property is removed. Specify `service.spec.type: "LoadBalancer"` to create an external load balancer instead. -* The `service.spec.publicIPs` property is deprecated and now called `service.spec.deprecatedPublicIPs`. This property will be removed entirely when v1beta3 is removed. The vast majority of users of this field were using it to expose services on ports on the node. Those users should specify `service.spec.type: "NodePort"` instead. Read [External Services](services.md#external-services) for more info. If this is not sufficient for your use case, please file an issue or contact @thockin. +* The `service.spec.publicIPs` property is deprecated and now called `service.spec.deprecatedPublicIPs`. This property will be removed entirely when v1beta3 is removed. The vast majority of users of this field were using it to expose services on ports on the node. Those users should specify `service.spec.type: "NodePort"` instead. Read [External Services](user-guide/services.md#external-services) for more info. If this is not sufficient for your use case, please file an issue or contact @thockin. Some other difference between v1beta3 and v1: -* The `pod.spec.containers[*].privileged` and `pod.spec.containers[*].capabilities` properties are now nested under the `pod.spec.containers[*].securityContext` property. See [Security Contexts](security-context.md). +* The `pod.spec.containers[*].privileged` and `pod.spec.containers[*].capabilities` properties are now nested under the `pod.spec.containers[*].securityContext` property. See [Security Contexts](user-guide/security-context.md). * The `pod.spec.host` property is renamed to `pod.spec.nodeName`. * The `endpoints.subsets[*].addresses.IP` property is renamed to `endpoints.subsets[*].addresses.ip`. * The `pod.status.containerStatuses[*].state.termination` and `pod.status.containerStatuses[*].lastState.termination` properties are renamed to `pod.status.containerStatuses[*].state.terminated` and `pod.status.containerStatuses[*].lastState.terminated` respectively. @@ -79,7 +79,7 @@ Some important differences between v1beta1/2 and v1beta3: * The `labels` query parameter has been renamed to `labelSelector`. * The `fields` query parameter has been renamed to `fieldSelector`. * The container `entrypoint` has been renamed to `command`, and `command` has been renamed to `args`. -* Container, volume, and node resources are expressed as nested maps (e.g., `resources{cpu:1}`) rather than as individual fields, and resource values support [scaling suffixes](compute-resources.md#specifying-resource-quantities) rather than fixed scales (e.g., milli-cores). +* Container, volume, and node resources are expressed as nested maps (e.g., `resources{cpu:1}`) rather than as individual fields, and resource values support [scaling suffixes](user-guide/compute-resources.md#specifying-resource-quantities) rather than fixed scales (e.g., milli-cores). * Restart policy is represented simply as a string (e.g., `"Always"`) rather than as a nested map (`always{}`). * Pull policies changed from `PullAlways`, `PullNever`, and `PullIfNotPresent` to `Always`, `Never`, and `IfNotPresent`. * The volume `source` is inlined into `volume` rather than nested. diff --git a/docs/cluster-large.md b/docs/cluster-large.md index b383e7cb59..bb5df665dc 100644 --- a/docs/cluster-large.md +++ b/docs/cluster-large.md @@ -68,7 +68,7 @@ To avoid running into cluster addon resource issues, when creating a cluster wit * [FluentD with ElasticSearch Plugin](../cluster/saltbase/salt/fluentd-es/fluentd-es.yaml) * [FluentD with GCP Plugin](../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml) -For directions on how to detect if addon containers are hitting resource limits, see the [Troubleshooting section of Compute Resources](compute-resources.md#troubleshooting). +For directions on how to detect if addon containers are hitting resource limits, see the [Troubleshooting section of Compute Resources](user-guide/compute-resources.md#troubleshooting). diff --git a/docs/cluster-troubleshooting.md b/docs/cluster-troubleshooting.md index 17df092ebf..d9690c9efb 100644 --- a/docs/cluster-troubleshooting.md +++ b/docs/cluster-troubleshooting.md @@ -14,7 +14,7 @@ certainly want the docs that go with that version. # Cluster Troubleshooting Most of the time, if you encounter problems, it is your application that is having problems. For application -problems please see the [application troubleshooting guide](application-troubleshooting.md). +problems please see the [application troubleshooting guide](user-guide/application-troubleshooting.md). ## Listing your cluster The first thing to debug in your cluster is if your nodes are all registered correctly. diff --git a/docs/design/access.md b/docs/design/access.md index 912f93aa62..6192792d9b 100644 --- a/docs/design/access.md +++ b/docs/design/access.md @@ -165,7 +165,7 @@ In the Simple Profile: Namespaces versus userAccount vs Labels: - `userAccount`s are intended for audit logging (both name and UID should be logged), and to define who has access to `namespace`s. -- `labels` (see [docs/labels.md](../../docs/labels.md)) should be used to distinguish pods, users, and other objects that cooperate towards a common goal but are different in some way, such as version, or responsibilities. +- `labels` (see [docs/user-guide/labels.md](../../docs/user-guide/labels.md)) should be used to distinguish pods, users, and other objects that cooperate towards a common goal but are different in some way, such as version, or responsibilities. - `namespace`s prevent name collisions between uncoordinated groups of people, and provide a place to attach common policies for co-operating groups of people. diff --git a/docs/design/architecture.md b/docs/design/architecture.md index 22d61b270e..d2f9d942e4 100644 --- a/docs/design/architecture.md +++ b/docs/design/architecture.md @@ -27,11 +27,11 @@ The Kubernetes node has the services necessary to run application containers and Each node runs Docker, of course. Docker takes care of the details of downloading images and running containers. ### Kubelet -The **Kubelet** manages [pods](../pods.md) and their containers, their images, their volumes, etc. +The **Kubelet** manages [pods](../user-guide/pods.md) and their containers, their images, their volumes, etc. ### Kube-Proxy -Each node also runs a simple network proxy and load balancer (see the [services FAQ](https://github.com/GoogleCloudPlatform/kubernetes/wiki/Services-FAQ) for more details). This reflects `services` (see [the services doc](../services.md) for more details) as defined in the Kubernetes API on each node and can do simple TCP and UDP stream forwarding (round robin) across a set of backends. +Each node also runs a simple network proxy and load balancer (see the [services FAQ](https://github.com/GoogleCloudPlatform/kubernetes/wiki/Services-FAQ) for more details). This reflects `services` (see [the services doc](../user-guide/services.md) for more details) as defined in the Kubernetes API on each node and can do simple TCP and UDP stream forwarding (round robin) across a set of backends. Service endpoints are currently found via [DNS](../admin/dns.md) or through environment variables (both [Docker-links-compatible](https://docs.docker.com/userguide/dockerlinks/) and Kubernetes {FOO}_SERVICE_HOST and {FOO}_SERVICE_PORT variables are supported). These variables resolve to ports managed by the service proxy. @@ -55,7 +55,7 @@ The scheduler binds unscheduled pods to nodes via the `/binding` API. The schedu All other cluster-level functions are currently performed by the Controller Manager. For instance, `Endpoints` objects are created and updated by the endpoints controller, and nodes are discovered, managed, and monitored by the node controller. These could eventually be split into separate components to make them independently pluggable. -The [`replicationcontroller`](../replication-controller.md) is a mechanism that is layered on top of the simple [`pod`](../pods.md) API. We eventually plan to port it to a generic plug-in mechanism, once one is implemented. +The [`replicationcontroller`](../user-guide/replication-controller.md) is a mechanism that is layered on top of the simple [`pod`](../user-guide/pods.md) API. We eventually plan to port it to a generic plug-in mechanism, once one is implemented. diff --git a/docs/design/networking.md b/docs/design/networking.md index 1ebc3d472e..c13daa1bac 100644 --- a/docs/design/networking.md +++ b/docs/design/networking.md @@ -140,7 +140,7 @@ to serve the purpose outside of GCE. ## Pod to service -The [service](../services.md) abstraction provides a way to group pods under a +The [service](../user-guide/services.md) abstraction provides a way to group pods under a common access policy (e.g. load-balanced). The implementation of this creates a virtual IP which clients can access and which is transparently proxied to the pods in a Service. Each node runs a kube-proxy process which programs diff --git a/docs/design/resources.md b/docs/design/resources.md index 437aac09d0..fb147fa5be 100644 --- a/docs/design/resources.md +++ b/docs/design/resources.md @@ -13,7 +13,7 @@ certainly want the docs that go with that version. **Note: this is a design doc, which describes features that have not been completely implemented. -User documentation of the current state is [here](../compute-resources.md). The tracking issue for +User documentation of the current state is [here](../user-guide/compute-resources.md). The tracking issue for implementation of this model is [#168](https://github.com/GoogleCloudPlatform/kubernetes/issues/168). Currently, only memory and cpu limits on containers (not pods) are supported. "memory" is in bytes and "cpu" is in @@ -163,7 +163,7 @@ The following are planned future extensions to the resource model, included here ## Usage data -Because resource usage and related metrics change continuously, need to be tracked over time (i.e., historically), can be characterized in a variety of ways, and are fairly voluminous, we will not include usage in core API objects, such as [Pods](../pods.md) and Nodes, but will provide separate APIs for accessing and managing that data. See the Appendix for possible representations of usage data, but the representation we'll use is TBD. +Because resource usage and related metrics change continuously, need to be tracked over time (i.e., historically), can be characterized in a variety of ways, and are fairly voluminous, we will not include usage in core API objects, such as [Pods](../user-guide/pods.md) and Nodes, but will provide separate APIs for accessing and managing that data. See the Appendix for possible representations of usage data, but the representation we'll use is TBD. Singleton values for observed and predicted future usage will rapidly prove inadequate, so we will support the following structure for extended usage information: diff --git a/docs/developer-guide.md b/docs/developer-guide.md index 880ac4b813..84d6b6011f 100644 --- a/docs/developer-guide.md +++ b/docs/developer-guide.md @@ -16,7 +16,7 @@ certainly want the docs that go with that version. The developer guide is for anyone wanting to either write code which directly accesses the kubernetes API, or to contribute directly to the kubernetes project. -It assumes some familiarity with concepts in the [User Guide](user-guide.md) and the [Cluster Admin +It assumes some familiarity with concepts in the [User Guide](user-guide/user-guide.md) and the [Cluster Admin Guide](admin/README.md). @@ -24,7 +24,7 @@ Guide](admin/README.md). * API objects are explained at [http://kubernetes.io/third_party/swagger-ui/](http://kubernetes.io/third_party/swagger-ui/). -* **Annotations** ([annotations.md](annotations.md)): are for attaching arbitrary non-identifying metadata to objects. +* **Annotations** ([user-guide/annotations.md](user-guide/annotations.md)): are for attaching arbitrary non-identifying metadata to objects. Programs that automate Kubernetes objects may use annotations to store small amounts of their state. * **API Conventions** ([api-conventions.md](api-conventions.md)): diff --git a/docs/getting-started-guides/aws.md b/docs/getting-started-guides/aws.md index af04aa5b94..a5aa228342 100644 --- a/docs/getting-started-guides/aws.md +++ b/docs/getting-started-guides/aws.md @@ -91,7 +91,7 @@ By default, `kubectl` will use the `kubeconfig` file generated during the cluste For more information, please read [kubeconfig files](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/kubeconfig-file.md) ### Examples -See [a simple nginx example](../../examples/simple-nginx.md) to try out your new cluster. +See [a simple nginx example](../../docs/user-guide/simple-nginx.md) to try out your new cluster. The "Guestbook" application is another popular example to get started with Kubernetes: [guestbook example](../../examples/guestbook/) diff --git a/docs/getting-started-guides/azure.md b/docs/getting-started-guides/azure.md index c975d6828e..46d3e15d3d 100644 --- a/docs/getting-started-guides/azure.md +++ b/docs/getting-started-guides/azure.md @@ -63,7 +63,7 @@ The script above will start (by default) a single master VM along with 4 worker can tweak some of these parameters by editing `cluster/azure/config-default.sh`. ## Getting started with your cluster -See [a simple nginx example](../../examples/simple-nginx.md) to try out your new cluster. +See [a simple nginx example](../user-guide/simple-nginx.md) to try out your new cluster. For more complete applications, please look in the [examples directory](../../examples/). diff --git a/docs/getting-started-guides/centos/centos_manual_config.md b/docs/getting-started-guides/centos/centos_manual_config.md index ecf6aad0fa..1ea4e04d8f 100644 --- a/docs/getting-started-guides/centos/centos_manual_config.md +++ b/docs/getting-started-guides/centos/centos_manual_config.md @@ -177,7 +177,7 @@ centos-minion Ready **The cluster should be running! Launch a test pod.** -You should have a functional cluster, check out [101](../../../examples/walkthrough/README.md)! +You should have a functional cluster, check out [101](../../../docs/user-guide/walkthrough/README.md)! diff --git a/docs/getting-started-guides/coreos/bare_metal_offline.md b/docs/getting-started-guides/coreos/bare_metal_offline.md index 801ae680cf..aa540bcd03 100644 --- a/docs/getting-started-guides/coreos/bare_metal_offline.md +++ b/docs/getting-started-guides/coreos/bare_metal_offline.md @@ -634,7 +634,7 @@ Reboot these servers to get the images PXEd and ready for running containers! ## Creating test pod Now that the CoreOS with Kubernetes installed is up and running lets spin up some Kubernetes pods to demonstrate the system. -See [a simple nginx example](../../../examples/simple-nginx.md) to try out your new cluster. +See [a simple nginx example](../../../docs/user-guide/simple-nginx.md) to try out your new cluster. For more complete applications, please look in the [examples directory](../../../examples/). diff --git a/docs/getting-started-guides/docker.md b/docs/getting-started-guides/docker.md index 837a0e4888..bde5668906 100644 --- a/docs/getting-started-guides/docker.md +++ b/docs/getting-started-guides/docker.md @@ -47,7 +47,7 @@ docker run --net=host -d gcr.io/google_containers/etcd:2.0.9 /usr/local/bin/etcd docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v0.21.2 /hyperkube kubelet --api_servers=http://localhost:8080 --v=2 --address=0.0.0.0 --enable_server --hostname_override=127.0.0.1 --config=/etc/kubernetes/manifests ``` -This actually runs the kubelet, which in turn runs a [pod](../pods.md) that contains the other master components. +This actually runs the kubelet, which in turn runs a [pod](../user-guide/pods.md) that contains the other master components. ### Step Three: Run the service proxy *Note, this could be combined with master above, but it requires --privileged for iptables manipulation* diff --git a/docs/getting-started-guides/fedora/fedora_manual_config.md b/docs/getting-started-guides/fedora/fedora_manual_config.md index 0a4c5957d6..834cafd66c 100644 --- a/docs/getting-started-guides/fedora/fedora_manual_config.md +++ b/docs/getting-started-guides/fedora/fedora_manual_config.md @@ -204,7 +204,7 @@ $ kubectl delete -f node.json **The cluster should be running! Launch a test pod.** -You should have a functional cluster, check out [101](../../../examples/walkthrough/README.md)! +You should have a functional cluster, check out [101](../../../docs/user-guide/walkthrough/README.md)! diff --git a/docs/getting-started-guides/gce.md b/docs/getting-started-guides/gce.md index f75f9b90e6..d7cd1e6032 100644 --- a/docs/getting-started-guides/gce.md +++ b/docs/getting-started-guides/gce.md @@ -68,7 +68,7 @@ wget -q -O - https://get.k8s.io | bash Once this command completes, you will have a master VM and four worker VMs, running as a Kubernetes cluster. -By default, some containers will already be running on your cluster. Containers like `kibana` and `elasticsearch` provide [logging](../logging.md), while `heapster` provides [monitoring](../../cluster/addons/cluster-monitoring/README.md) services. +By default, some containers will already be running on your cluster. Containers like `kibana` and `elasticsearch` provide [logging](logging.md), while `heapster` provides [monitoring](../../cluster/addons/cluster-monitoring/README.md) services. The script run by the commands above creates a cluster with the name/prefix "kubernetes". It defines one specific cluster config, so you can't run it more than once. @@ -123,7 +123,7 @@ Once `kubectl` is in your path, you can use it to look at your cluster. E.g., ru $ kubectl get --all-namespaces services ``` -should show a set of [services](../services.md) that look something like this: +should show a set of [services](../user-guide/services.md) that look something like this: ```shell NAMESPACE NAME LABELS SELECTOR IP(S) PORT(S) @@ -136,7 +136,7 @@ kube-system monitoring-heapster kubernetes.io/cluster-service=true,kubernete kube-system monitoring-influxdb kubernetes.io/cluster-service=true,kubernetes.io/name=InfluxDB k8s-app=influxGrafana 10.0.210.156 8083/TCP 8086/TCP ``` -Similarly, you can take a look at the set of [pods](../pods.md) that were created during cluster startup. +Similarly, you can take a look at the set of [pods](../user-guide/pods.md) that were created during cluster startup. You can do this via the ```shell @@ -162,7 +162,7 @@ Some of the pods may take a few seconds to start up (during this time they'll sh #### Run some examples -Then, see [a simple nginx example](../../examples/simple-nginx.md) to try out your new cluster. +Then, see [a simple nginx example](../../docs/user-guide/simple-nginx.md) to try out your new cluster. For more complete applications, please look in the [examples directory](../../examples/). The [guestbook example](../../examples/guestbook/) is a good "getting started" walkthrough. diff --git a/docs/getting-started-guides/locally.md b/docs/getting-started-guides/locally.md index f038fffca2..28e038c285 100644 --- a/docs/getting-started-guides/locally.md +++ b/docs/getting-started-guides/locally.md @@ -99,8 +99,8 @@ cluster/kubectl.sh get replicationcontrollers ### Running a user defined pod -Note the difference between a [container](../containers.md) -and a [pod](../pods.md). Since you only asked for the former, kubernetes will create a wrapper pod for you. +Note the difference between a [container](../user-guide/containers.md) +and a [pod](../user-guide/pods.md). Since you only asked for the former, kubernetes will create a wrapper pod for you. However you cannot view the nginx start page on localhost. To verify that nginx is running you need to run `curl` within the docker container (try `docker exec`). You can control the specifications of a pod via a user defined manifest, and reach nginx through your browser on the port specified therein: diff --git a/docs/getting-started-guides/rkt/README.md b/docs/getting-started-guides/rkt/README.md index af16112a56..22cf1b9c4a 100644 --- a/docs/getting-started-guides/rkt/README.md +++ b/docs/getting-started-guides/rkt/README.md @@ -98,7 +98,7 @@ Note: CoreOS is not supported as the master using the automated launch scripts. The master node is always Ubuntu. ### Getting started with your cluster -See [a simple nginx example](../../../examples/simple-nginx.md) to try out your new cluster. +See [a simple nginx example](../../../docs/user-guide/simple-nginx.md) to try out your new cluster. For more complete applications, please look in the [examples directory](../../../examples/). diff --git a/docs/getting-started-guides/scratch.md b/docs/getting-started-guides/scratch.md index 3d4e6d40c3..a3380096db 100644 --- a/docs/getting-started-guides/scratch.md +++ b/docs/getting-started-guides/scratch.md @@ -56,7 +56,7 @@ steps that existing cluster setup scripts are making. ### Learning 1. You should be familiar with using Kubernetes already. We suggest you set up a temporary cluster by following one of the other Getting Started Guides. - This will help you become familiar with the CLI ([kubectl](../user-guide/kubectl/kubectl.md)) and concepts ([pods](../pods.md), [services](../services.md), etc.) first. + This will help you become familiar with the CLI ([kubectl](../user-guide/kubectl/kubectl.md)) and concepts ([pods](../user-guide/pods.md), [services](../user-guide/services.md), etc.) first. 1. You should have `kubectl` installed on your desktop. This will happen as a side effect of completing one of the other Getting Started Guides. @@ -124,7 +124,7 @@ You need to select an address range for the Pod IPs. using `10.10.0.0/24` through `10.10.255.0/24`, respectively. - Need to make these routable or connect with overlay. -Kubernetes also allocates an IP to each [service](../services.md). However, +Kubernetes also allocates an IP to each [service](../user-guide/services.md). However, service IPs do not necessarily need to be routable. The kube-proxy takes care of translating Service IPs to Pod IPs before traffic leaves the node. You do need to Allocate a block of IPs for services. Call this @@ -255,7 +255,7 @@ to read. This guide uses `/var/lib/kube-apiserver/known_tokens.csv`. The format for this file is described in the [authentication documentation](../admin/authentication.md). For distributing credentials to clients, the convention in Kubernetes is to put the credentials -into a [kubeconfig file](../kubeconfig-file.md). +into a [kubeconfig file](../user-guide/kubeconfig-file.md). The kubeconfig file for the administrator can be created as follows: - If you have already used Kubernetes with a non-custom cluster (for example, used a Getting Started diff --git a/docs/glossary.md b/docs/glossary.md index b485d6adad..456229293c 100644 --- a/docs/glossary.md +++ b/docs/glossary.md @@ -25,14 +25,14 @@ to add sophisticated authorization, and to make it pluggable. See the [access c non-identifying metadata associated with an object, such as provenance information. Not indexed. **Image** -: A [Docker Image](https://docs.docker.com/userguide/dockerimages/). See [images](images.md). +: A [Docker Image](https://docs.docker.com/userguide/dockerimages/). See [images](user-guide/images.md). **Label** : A key/value pair conveying user-defined identifying attributes of an object, and used to form sets of related objects, such as -pods which are replicas in a load-balanced service. Not intended to hold large or non-human-readable data. See [labels](labels.md). +pods which are replicas in a load-balanced service. Not intended to hold large or non-human-readable data. See [labels](user-guide/labels.md). **Name** -: A user-provided name for an object. See [identifiers](identifiers.md). +: A user-provided name for an object. See [identifiers](user-guide/identifiers.md). **Namespace** : A namespace is like a prefix to the name of an object. You can configure your client to use a particular namespace, @@ -40,33 +40,33 @@ so you do not have to type it all the time. Namespaces allow multiple projects t **Pod** : A collection of containers which will be scheduled onto the same node, which share and an IP and port space, and which -can be created/destroyed together. See [pods](pods.md). +can be created/destroyed together. See [pods](user-guide/pods.md). **Replication Controller** : A _replication controller_ ensures that a specified number of pod "replicas" are running at any one time. Both allows for easy scaling of replicated systems, and handles restarting of a Pod when the machine it is on reboots or otherwise fails. **Resource** -: CPU, memory, and other things that a pod can request. See [compute resources](compute-resources.md). +: CPU, memory, and other things that a pod can request. See [compute resources](user-guide/compute-resources.md). **Secret** -: An object containing sensitive information, such as authentication tokens, which can be made available to containers upon request. See [secrets](secrets.md). +: An object containing sensitive information, such as authentication tokens, which can be made available to containers upon request. See [secrets](user-guide/secrets.md). **Selector** : An expression that matches Labels. Can identify related objects, such as pods which are replicas in a load-balanced -service. See [labels](labels.md). +service. See [labels](user-guide/labels.md). **Service** -: A load-balanced set of `pods` which can be accessed via a single stable IP address. See [services](services.md). +: A load-balanced set of `pods` which can be accessed via a single stable IP address. See [services](user-guide/services.md). **UID** : An identifier on all Kubernetes objects that is set by the Kubernetes API server. Can be used to distinguish between historical -occurrences of same-Name objects. See [identifiers](identifiers.md). +occurrences of same-Name objects. See [identifiers](user-guide/identifiers.md). **Volume** : A directory, possibly with some data in it, which is accessible to a Container as part of its filesystem. Kubernetes Volumes build upon [Docker Volumes](https://docs.docker.com/userguide/dockervolumes/), adding provisioning of the Volume -directory and/or device. See [volumes](volumes.md). +directory and/or device. See [volumes](user-guide/volumes.md). diff --git a/docs/proposals/autoscaling.md b/docs/proposals/autoscaling.md index e56a22566a..9b6e83b2b1 100644 --- a/docs/proposals/autoscaling.md +++ b/docs/proposals/autoscaling.md @@ -35,7 +35,7 @@ done automatically based on statistical analysis and thresholds. * This proposal is for horizontal scaling only. Vertical scaling will be handled in [issue 2072](https://github.com/GoogleCloudPlatform/kubernetes/issues/2072) * `ReplicationControllers` will not know about the auto-scaler, they are the target of the auto-scaler. The `ReplicationController` responsibilities are -constrained to only ensuring that the desired number of pods are operational per the [Replication Controller Design](../replication-controller.md#responsibilities-of-the-replication-controller) +constrained to only ensuring that the desired number of pods are operational per the [Replication Controller Design](../user-guide/replication-controller.md#responsibilities-of-the-replication-controller) * Auto-scalers will be loosely coupled with data gathering components in order to allow a wide variety of input sources * Auto-scalable resources will support a scale verb ([1629](https://github.com/GoogleCloudPlatform/kubernetes/issues/1629)) such that the auto-scaler does not directly manipulate the underlying resource. @@ -56,7 +56,7 @@ applications will expose one or more network endpoints for clients to connect to balanced or situated behind a proxy - the data from those proxies and load balancers can be used to estimate client to server traffic for applications. This is the primary, but not sole, source of data for making decisions. -Within Kubernetes a [kube proxy](../services.md#ips-and-vips) +Within Kubernetes a [kube proxy](../user-guide/services.md#ips-and-vips) running on each node directs service requests to the underlying implementation. While the proxy provides internal inter-pod connections, there will be L3 and L7 proxies and load balancers that manage @@ -239,7 +239,7 @@ or down as appropriate. In the future this may be more configurable. ### Interactions with a deployment -In a deployment it is likely that multiple replication controllers must be monitored. For instance, in a [rolling deployment](../replication-controller.md#rolling-updates) +In a deployment it is likely that multiple replication controllers must be monitored. For instance, in a [rolling deployment](../user-guide/replication-controller.md#rolling-updates) there will be multiple replication controllers, with one scaling up and another scaling down. This means that an auto-scaler must be aware of the entire set of capacity that backs a service so it does not fight with the deployer. `AutoScalerSpec.MonitorSelector` is what provides this ability. By using a selector that spans the entire service the auto-scaler can monitor capacity diff --git a/docs/service-accounts.md b/docs/service-accounts.md index a7e6557bc4..42816bdb75 100644 --- a/docs/service-accounts.md +++ b/docs/service-accounts.md @@ -37,10 +37,10 @@ When you create a pod, you do not need to specify a service account. It is automatically assigned the `default` service account of the same namespace. If you get the raw json or yaml for a pod you have created (e.g. `kubectl get pods/podname -o yaml`), you can see the `spec.serviceAccount` field has been -[automatically set](working-with-resources.md#resources-are-automatically-modified). +[automatically set](user-guide/working-with-resources.md#resources-are-automatically-modified). You can access the API using a proxy or with a client library, as described in -[Accessing the Cluster](accessing-the-cluster.md#accessing-the-api-from-a-pod). +[Accessing the Cluster](user-guide/accessing-the-cluster.md#accessing-the-api-from-a-pod). ## Using Multiple Service Accounts diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 333b1579eb..52fe0b8e0e 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -14,7 +14,7 @@ certainly want the docs that go with that version. # Troubleshooting Sometimes things go wrong. This guide is aimed at making them right. It has two sections: - * [Troubleshooting your application](application-troubleshooting.md) - Useful for users who are deploying code into Kubernetes and wondering why it is not working. + * [Troubleshooting your application](user-guide/application-troubleshooting.md) - Useful for users who are deploying code into Kubernetes and wondering why it is not working. * [Troubleshooting your cluster](cluster-troubleshooting.md) - Useful for cluster administrators and people whose Kubernetes cluster is unhappy. diff --git a/docs/user-guide/accessing-the-cluster.md b/docs/user-guide/accessing-the-cluster.md index 8fec6a014d..c7bdc8caa4 100644 --- a/docs/user-guide/accessing-the-cluster.md +++ b/docs/user-guide/accessing-the-cluster.md @@ -42,7 +42,7 @@ kubernetes CLI, `kubectl`. To access a cluster, you need to know the location of the cluster and have credentials to access it. Typically, this is automatically set-up when you work through -though a [Getting started guide](getting-started-guides/README.md), +though a [Getting started guide](../getting-started-guides/README.md), or someone else setup the cluster and provided you with credentials and a location. Check the location and credentials that kubectl knows about with this command: @@ -50,8 +50,8 @@ Check the location and credentials that kubectl knows about with this command: kubectl config view ``` -Many of the [examples](../examples/) provide an introduction to using -kubectl and complete documentation is found in the [kubectl manual](user-guide/kubectl/kubectl.md). +Many of the [examples](../../examples/) provide an introduction to using +kubectl and complete documentation is found in the [kubectl manual](kubectl/kubectl.md). ### Directly accessing the REST API Kubectl handles locating and authenticating to the apiserver. @@ -76,7 +76,7 @@ Run it like this: ``` kubectl proxy --port=8080 & ``` -See [kubectl proxy](user-guide/kubectl/kubectl_proxy.md) for more details. +See [kubectl proxy](kubectl/kubectl_proxy.md) for more details. Then you can explore the API with curl, wget, or a browser, like so: ``` @@ -110,13 +110,13 @@ certificate. On some clusters, the apiserver does not require authentication; it may serve on localhost, or be protected by a firewall. There is not a standard -for this. [Configuring Access to the API](admin/accessing-the-api.md) +for this. [Configuring Access to the API](../admin/accessing-the-api.md) describes how a cluster admin can configure this. Such approaches may conflict with future high-availability support. ### Programmatic access to the API -There are [client libraries](client-libraries.md) for accessing the API +There are [client libraries](../client-libraries.md) for accessing the API from several languages. The Kubernetes project-supported [Go](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/pkg/client) client library can use the same [kubeconfig file](kubeconfig-file.md) @@ -134,7 +134,7 @@ the `kubernetes` DNS name, which resolves to a Service IP which in turn will be routed to an apiserver. The recommended way to authenticate to the apiserver is with a -[service account](service-accounts.md) credential. By default, a pod +[service account](../service-accounts.md) credential. By default, a pod is associated with a service account, and a credential (token) for that service account is placed into the filesystem tree of each container in that pod, at `/var/run/secrets/kubernetes.io/serviceaccount/token`. @@ -144,7 +144,7 @@ From within a pod the recommended ways to connect to API are: process within a container. This proxies the kubernetes API to the localhost interface of the pod, so that other processes in any container of the pod can access it. See this [example of using kubectl proxy - in a pod](../examples/kubectl-container/). + in a pod](../../examples/kubectl-container/). - use the Go client library, and create a client using the `client.NewInCluster()` factory. This handles locating and authenticating to the apiserver. In each case, the credentials of the pod are used to communicate securely with the apiserver. @@ -153,7 +153,7 @@ In each case, the credentials of the pod are used to communicate securely with t ## Accessing services running on the cluster The previous section was about connecting the Kubernetes API server. This section is about connecting to other services running on Kubernetes cluster. In kubernetes, the -[nodes](admin/node.md), [pods](pods.md) and [services](services.md) all have +[nodes](../admin/node.md), [pods](pods.md) and [services](services.md) all have their own IPs. In many cases, the node IPs, pod IPs, and some service IPs on a cluster will not be routable, so they will not be reachable from a machine outside the cluster, such as your desktop machine. @@ -163,7 +163,7 @@ You have several options for connecting to nodes, pods and services from outside - Access services through public IPs. - Use a service with type `NodePort` or `LoadBalancer` to make the service reachable outside the cluster. See the [services](services.md) and - [kubectl expose](user-guide/kubectl/kubectl_expose.md) documentation. + [kubectl expose](kubectl/kubectl_expose.md) documentation. - Depending on your cluster environment, this may just expose the service to your corporate network, or it may expose it to the internet. Think about whether the service being exposed is secure. Does it do its own authentication? @@ -179,7 +179,7 @@ You have several options for connecting to nodes, pods and services from outside - Only works for HTTP/HTTPS. - Described [here](#discovering-builtin-services). - Access from a node or pod in the cluster. - - Run a pod, and then connect to a shell in it using [kubectl exec](user-guide/kubectl/kubectl_exec.md). + - Run a pod, and then connect to a shell in it using [kubectl exec](kubectl/kubectl_exec.md). Connect to other nodes, pods, and services from that shell. - Some clusters may allow you to ssh to a node in the cluster. From there you may be able to access cluster services. This is a non-standard method, and will work on some clusters but @@ -279,5 +279,5 @@ will typically ensure that the latter types are setup correctly. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/accessing-the-cluster.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/accessing-the-cluster.md?pixel)]() diff --git a/docs/user-guide/annotations.md b/docs/user-guide/annotations.md index 27a9685e53..7ecf7bbe70 100644 --- a/docs/user-guide/annotations.md +++ b/docs/user-guide/annotations.md @@ -40,5 +40,5 @@ Yes, this information could be stored in an external database or directory, but -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/annotations.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/annotations.md?pixel)]() diff --git a/docs/user-guide/application-troubleshooting.md b/docs/user-guide/application-troubleshooting.md index b062b779e9..d6a1d82a22 100644 --- a/docs/user-guide/application-troubleshooting.md +++ b/docs/user-guide/application-troubleshooting.md @@ -16,7 +16,7 @@ certainly want the docs that go with that version. This guide is to help users debug applications that are deployed into Kubernetes and not behaving correctly. This is *not* a guide for people who want to debug their cluster. For that you should check out -[this guide](cluster-troubleshooting.md) +[this guide](../cluster-troubleshooting.md) **Table of Contents** @@ -167,5 +167,5 @@ check: -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/application-troubleshooting.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/application-troubleshooting.md?pixel)]() diff --git a/docs/user-guide/compute-resources.md b/docs/user-guide/compute-resources.md index 5772e64589..9305ee67b0 100644 --- a/docs/user-guide/compute-resources.md +++ b/docs/user-guide/compute-resources.md @@ -121,7 +121,7 @@ To determine if a container cannot be scheduled or is being killed due to resour The resource usage of a pod is reported as part of the Pod status. -If [optional monitoring](../cluster/addons/cluster-monitoring/README.md) is configured for your cluster, +If [optional monitoring](../../cluster/addons/cluster-monitoring/README.md) is configured for your cluster, then pod resource usage can be retrieved from the monitoring system. ## Troubleshooting @@ -147,7 +147,7 @@ Here are some example command lines that extract just the necessary information: - `kubectl get nodes -o yaml | grep '\sname\|cpu\|memory'` - `kubectl get nodes -o json | jq '.items[] | {name: .metadata.name, cap: .status.capacity}'` -The [resource quota](admin/resource-quota.md) feature can be configured +The [resource quota](../admin/resource-quota.md) feature can be configured to limit the total amount of resources that can be consumed. If used in conjunction with namespaces, it can prevent one team from hogging all the resources. @@ -209,7 +209,7 @@ such as [EmptyDir volumes](volumes.md#emptydir). The current system only supports container limits for CPU and Memory. It is planned to add new resource types, including a node disk space -resource, and a framework for adding custom [resource types](design/resources.md#resource-types). +resource, and a framework for adding custom [resource types](../design/resources.md#resource-types). The current system does not facilitate overcommitment of resources because resources reserved with container limits are assured. It is planned to support multiple levels of [Quality of @@ -223,5 +223,5 @@ across providers and platforms. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/compute-resources.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/compute-resources.md?pixel)]() diff --git a/docs/user-guide/configuring-containers.md b/docs/user-guide/configuring-containers.md index 44cdea1549..15bd88969f 100644 --- a/docs/user-guide/configuring-containers.md +++ b/docs/user-guide/configuring-containers.md @@ -35,7 +35,7 @@ In the declarative style, all configuration is stored in YAML or JSON configurat ## Launching a container using a configuration file -Kubernetes executes containers in [*Pods*](../../docs/pods.md). A pod containing a simple Hello World container can be specified in YAML as follows: +Kubernetes executes containers in [*Pods*](pods.md). A pod containing a simple Hello World container can be specified in YAML as follows: ```yaml apiVersion: v1 @@ -53,7 +53,7 @@ The value of `metadata.name`, `hello-world`, will be the name of the pod resourc `restartPolicy: Never` indicates that we just want to run the container once and then terminate the pod. -The [`command`](../../docs/containers.md#containers-and-commands) overrides the Docker container’s `Entrypoint`. Command arguments (corresponding to Docker’s `Cmd`) may be specified using `args`, as follows: +The [`command`](containers.md#containers-and-commands) overrides the Docker container’s `Entrypoint`. Command arguments (corresponding to Docker’s `Cmd`) may be specified using `args`, as follows: ```yaml command: ["/bin/echo"] diff --git a/docs/user-guide/connecting-applications.md b/docs/user-guide/connecting-applications.md index d42bf9a0fd..c055743a76 100644 --- a/docs/user-guide/connecting-applications.md +++ b/docs/user-guide/connecting-applications.md @@ -78,7 +78,7 @@ You can read more about [how we achieve this](../admin/networking.md#how-to-achi So we have pods running nginx in a flat, cluster wide, address space. In theory, you could talk to these pods directly, but what happens when a node dies? The pods die with it, and the replication controller will create new ones, with different ips. This is the problem a Service solves. -A Kubernetes Service is an abstraction which defines a logical set of Pods running somewhere in your cluster, that all provide the same functionality. When created, each Service is assigned a unique IP address (also called clusterIP). This address is tied to the lifespan of the Service, and will not change while the Service is alive. Pods can be configured to talk to the Service, and know that communication to the Service will be automatically load-balanced out to some pod that is a member of the Service ([why not use round robin dns?](../services.md#why-not-use-round-robin-dns)). +A Kubernetes Service is an abstraction which defines a logical set of Pods running somewhere in your cluster, that all provide the same functionality. When created, each Service is assigned a unique IP address (also called clusterIP). This address is tied to the lifespan of the Service, and will not change while the Service is alive. Pods can be configured to talk to the Service, and know that communication to the Service will be automatically load-balanced out to some pod that is a member of the Service ([why not use round robin dns?](services.md#why-not-use-round-robin-dns)). You can create a Service for your 2 nginx replicas with the following yaml: ```yaml @@ -106,7 +106,7 @@ $ kubectl get ep NAME ENDPOINTS nginxsvc 10.245.0.14:80,10.245.0.15:80 ``` -You should now be able to curl the nginx Service on `10.0.208.159:80` from any node in your cluster. Note that the Service ip is completely virtual, it never hits the wire, if you’re curious about how this works you can read more about the [service proxy](../services.md#virtual-ips-and-service-proxies). +You should now be able to curl the nginx Service on `10.0.208.159:80` from any node in your cluster. Note that the Service ip is completely virtual, it never hits the wire, if you’re curious about how this works you can read more about the [service proxy](services.md#virtual-ips-and-service-proxies). ## Accessing the Service from other pods in the cluster diff --git a/docs/user-guide/connecting-to-applications-port-forward.md b/docs/user-guide/connecting-to-applications-port-forward.md index 90d49cb443..c191d8c886 100644 --- a/docs/user-guide/connecting-to-applications-port-forward.md +++ b/docs/user-guide/connecting-to-applications-port-forward.md @@ -12,8 +12,8 @@ certainly want the docs that go with that version. -#Connecting to applications: kubectl port-forward -kubectl port-forward forwards connections to a local port to a port on a pod. Its man page is available [here](kubectl/kubectl_port-forward.md). Compared to [kubectl proxy](../../docs/accessing-the-cluster.md#using-kubectl-proxy), `kubectl port-forward` is more generic as it can forward TCP traffic while `kubectl proxy` can only forward HTTP traffic. This guide demonstrates how to use `kubectl port-forward` to connect to a Redis database, which may be useful for database debugging. +#Connecting to applications: kubectl port-forward +kubectl port-forward forwards connections to a local port to a port on a pod. Its man page is available [here](kubectl/kubectl_port-forward.md). Compared to [kubectl proxy](accessing-the-cluster.md#using-kubectl-proxy), `kubectl port-forward` is more generic as it can forward TCP traffic while `kubectl proxy` can only forward HTTP traffic. This guide demonstrates how to use `kubectl port-forward` to connect to a Redis database, which may be useful for database debugging. ## Creating a Redis master diff --git a/docs/user-guide/connecting-to-applications-proxy.md b/docs/user-guide/connecting-to-applications-proxy.md index 57478cf187..194d939aa0 100644 --- a/docs/user-guide/connecting-to-applications-proxy.md +++ b/docs/user-guide/connecting-to-applications-proxy.md @@ -12,8 +12,8 @@ certainly want the docs that go with that version. -#Connecting to applications: kubectl proxy and apiserver proxy -You have seen the [basics](../../docs/accessing-the-cluster.md) about `kubectl proxy` and `apiserver proxy`. This guide shows how to use them together to access a service([kube-ui](../../docs/ui.md)) running on the Kubernetes cluster from your workstation. +#Connecting to applications: kubectl proxy and apiserver proxy +You have seen the [basics](accessing-the-cluster.md) about `kubectl proxy` and `apiserver proxy`. This guide shows how to use them together to access a service([kube-ui](ui.md)) running on the Kubernetes cluster from your workstation. ##Getting the apiserver proxy URL of kube-ui @@ -22,7 +22,7 @@ kube-ui is deployed as a cluster add-on. To find its apiserver proxy URL, $ kubectl cluster-info | grep "KubeUI" KubeUI is running at https://173.255.119.104/api/v1/proxy/namespaces/kube-system/services/kube-ui ``` -if this command does not find the URL, try the steps [here](../../docs/ui.md#accessing-the-ui). +if this command does not find the URL, try the steps [here](ui.md#accessing-the-ui). ##Connecting to the kube-ui service from your local workstation diff --git a/docs/user-guide/container-environment.md b/docs/user-guide/container-environment.md index 1641e7ea13..88a667e3b4 100644 --- a/docs/user-guide/container-environment.md +++ b/docs/user-guide/container-environment.md @@ -119,5 +119,5 @@ Hook handlers are the way that hooks are surfaced to containers.  Containers ca -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/container-environment.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/container-environment.md?pixel)]() diff --git a/docs/user-guide/containers.md b/docs/user-guide/containers.md index ec930a8476..26e3195640 100644 --- a/docs/user-guide/containers.md +++ b/docs/user-guide/containers.md @@ -104,5 +104,5 @@ The relationship between Docker's capabilities and [Linux capabilities](http://m -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/containers.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/containers.md?pixel)]() diff --git a/docs/user-guide/debugging-services.md b/docs/user-guide/debugging-services.md index d8a1fe3e0f..01377bf984 100644 --- a/docs/user-guide/debugging-services.md +++ b/docs/user-guide/debugging-services.md @@ -504,5 +504,5 @@ Contact us on -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/debugging-services.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/debugging-services.md?pixel)]() diff --git a/docs/user-guide/deploying-applications.md b/docs/user-guide/deploying-applications.md index 03ccd2ea98..7cff202b12 100644 --- a/docs/user-guide/deploying-applications.md +++ b/docs/user-guide/deploying-applications.md @@ -18,7 +18,7 @@ You previously read about how to quickly deploy a simple replicated application ## Launching a set of replicas using a configuration file -Kubernetes creates and manages sets of replicated containers (actually, replicated [Pods](../../docs/pods.md)) using [*Replication Controllers*](../../docs/replication-controller.md). +Kubernetes creates and manages sets of replicated containers (actually, replicated [Pods](pods.md)) using [*Replication Controllers*](replication-controller.md). A replication controller simply ensures that a specified number of pod "replicas" are running at any one time. If there are too many, it will kill some. If there are too few, it will start more. It’s analogous to Google Compute Engine’s [Instance Group Manager](https://cloud.google.com/compute/docs/instance-groups/manager/) or AWS’s [Auto-scaling Group](http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroup.html) (with no scaling policies). @@ -82,7 +82,7 @@ If you try to delete the pods before deleting the replication controller, it wil ## Labels -Kubernetes uses user-defined key-value attributes called [*labels*](../../docs/labels.md) to categorize and identify sets of resources, such as pods and replication controllers. The example above specified a single label in the pod template, with key `app` and value `nginx`. All pods created carry that label, which can be viewed using `-L`: +Kubernetes uses user-defined key-value attributes called [*labels*](labels.md) to categorize and identify sets of resources, such as pods and replication controllers. The example above specified a single label in the pod template, with key `app` and value `nginx`. All pods created carry that label, which can be viewed using `-L`: ```bash $ kubectl get pods -L app NAME READY STATUS RESTARTS AGE APP @@ -97,7 +97,7 @@ CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS APP my-nginx nginx nginx app=nginx 2 nginx ``` -More importantly, the pod template’s labels are used to create a [`selector`](../../docs/labels.md#label-selectors) that will match pods carrying those labels. You can see this field by requesting it using the [Go template output format of `kubectl get`](kubectl/kubectl_get.md): +More importantly, the pod template’s labels are used to create a [`selector`](labels.md#label-selectors) that will match pods carrying those labels. You can see this field by requesting it using the [Go template output format of `kubectl get`](kubectl/kubectl_get.md): ```bash $ kubectl get rc my-nginx -o template --template="{{.spec.selector}}" map[app:nginx] diff --git a/docs/user-guide/downward-api.md b/docs/user-guide/downward-api.md index 155b69ea2b..5f4dab51c9 100644 --- a/docs/user-guide/downward-api.md +++ b/docs/user-guide/downward-api.md @@ -84,10 +84,10 @@ spec: ``` Some more thorough examples: - * [environment variables](../examples/environment-guide/) - * [downward API](../examples/downward-api/) + * [environment variables](environment-guide/) + * [downward API](downward-api/) -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/downward-api.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/downward-api.md?pixel)]() diff --git a/docs/user-guide/downward-api/README.md b/docs/user-guide/downward-api/README.md index e886bf41b4..a7c6bf1a14 100644 --- a/docs/user-guide/downward-api/README.md +++ b/docs/user-guide/downward-api/README.md @@ -21,7 +21,7 @@ namespace using the [downward API](https://github.com/GoogleCloudPlatform/kubern This example assumes you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting -started](../../docs/getting-started-guides/) for installation instructions for your platform. +started](../../../docs/getting-started-guides/) for installation instructions for your platform. ## Step One: Create the pod @@ -48,5 +48,5 @@ $ kubectl logs dapi-test-pod | grep POD_ -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/downward-api/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/downward-api/README.md?pixel)]() diff --git a/docs/user-guide/environment-guide/README.md b/docs/user-guide/environment-guide/README.md index c0453086da..37bbda31b6 100644 --- a/docs/user-guide/environment-guide/README.md +++ b/docs/user-guide/environment-guide/README.md @@ -21,7 +21,7 @@ environment information about itself, and a backend pod that it has accessed through the service. The goal is to illuminate the environment metadata available to running containers inside the Kubernetes cluster. The documentation for the kubernetes environment -is [here](../../docs/container-environment.md). +is [here](../../../docs/user-guide/container-environment.md). ![Diagram](diagram.png) @@ -30,7 +30,7 @@ Prerequisites This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the `kubectl` command line tool somewhere in your path. Please see the [getting -started](../../docs/getting-started-guides/) for installation instructions +started](../../../docs/getting-started-guides/) for installation instructions for your platform. Optional: Build your own containers @@ -81,8 +81,8 @@ Backend Namespace: default ``` First the frontend pod's information is printed. The pod name and -[namespace](../../docs/design/namespaces.md) are retreived from the -[Downward API](../../docs/downward-api.md). Next, `USER_VAR` is the name of +[namespace](../../../docs/design/namespaces.md) are retreived from the +[Downward API](../../../docs/user-guide/downward-api.md). Next, `USER_VAR` is the name of an environment variable set in the [pod definition](show-rc.yaml). Then, the dynamic kubernetes environment variables are scanned and printed. These are used to find the backend @@ -104,5 +104,5 @@ Cleanup -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/environment-guide/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/environment-guide/README.md?pixel)]() diff --git a/docs/user-guide/environment-guide/containers/README.md b/docs/user-guide/environment-guide/containers/README.md index 195c9e43b5..e58e718e29 100644 --- a/docs/user-guide/environment-guide/containers/README.md +++ b/docs/user-guide/environment-guide/containers/README.md @@ -35,5 +35,5 @@ specified `image:` with the one that you built. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/environment-guide/containers/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/environment-guide/containers/README.md?pixel)]() diff --git a/docs/user-guide/getting-into-containers.md b/docs/user-guide/getting-into-containers.md index ea20d13900..dc4005183a 100644 --- a/docs/user-guide/getting-into-containers.md +++ b/docs/user-guide/getting-into-containers.md @@ -16,7 +16,7 @@ certainly want the docs that go with that version. Developers can use `kubectl exec` to run commands in a container. This guide demonstrates two use cases. ##Using kubectl exec to check the environment variables of a container -Kubernetes exposes [services](../../docs/services.md#environment-variables) through environment variables. It is convenient to check these environment variables using `kubectl exec`. +Kubernetes exposes [services](services.md#environment-variables) through environment variables. It is convenient to check these environment variables using `kubectl exec`. We first create a pod and a service, diff --git a/docs/user-guide/identifiers.md b/docs/user-guide/identifiers.md index a462f27f36..84691ecce3 100644 --- a/docs/user-guide/identifiers.md +++ b/docs/user-guide/identifiers.md @@ -18,12 +18,12 @@ All objects in the Kubernetes REST API are unambiguously identified by a Name an For non-unique user-provided attributes, Kubernetes provides [labels](labels.md) and [annotations](annotations.md). ## Names -Names are generally client-provided. Only one object of a given kind can have a given name at a time (i.e., they are spatially unique). But if you delete an object, you can make a new object with the same name. Names are the used to refer to an object in a resource URL, such as `/api/v1/pods/some-name`. By convention, the names of Kubernetes resources should be up to maximum length of 253 characters and consist of lower case alphanumeric characters, `-`, and `.`, but certain resources have more specific restrictions. See the [identifiers design doc](design/identifiers.md) for the precise syntax rules for names. +Names are generally client-provided. Only one object of a given kind can have a given name at a time (i.e., they are spatially unique). But if you delete an object, you can make a new object with the same name. Names are the used to refer to an object in a resource URL, such as `/api/v1/pods/some-name`. By convention, the names of Kubernetes resources should be up to maximum length of 253 characters and consist of lower case alphanumeric characters, `-`, and `.`, but certain resources have more specific restrictions. See the [identifiers design doc](../design/identifiers.md) for the precise syntax rules for names. ## UIDs UID are generated by Kubernetes. Every object created over the whole lifetime of a Kubernetes cluster has a distinct UID (i.e., they are spatially and temporally unique). -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/identifiers.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/identifiers.md?pixel)]() diff --git a/docs/user-guide/images.md b/docs/user-guide/images.md index cf964a1f2a..22dbdbec04 100644 --- a/docs/user-guide/images.md +++ b/docs/user-guide/images.md @@ -215,7 +215,7 @@ spec: ``` This needs to be done for each pod that is using a private registry. However, setting of this field can be automated by setting the imagePullSecrets -in a [serviceAccount](service-accounts.md) resource. +in a [serviceAccount](../service-accounts.md) resource. Currently, all pods will potentially have read access to any images which were pulled using imagePullSecrets. That is, imagePullSecrets does *NOT* protect your @@ -251,5 +251,5 @@ common use cases and suggested solutions. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/images.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/images.md?pixel)]() diff --git a/docs/user-guide/kubeconfig-file.md b/docs/user-guide/kubeconfig-file.md index 1388d39abb..0a199f7b7a 100644 --- a/docs/user-guide/kubeconfig-file.md +++ b/docs/user-guide/kubeconfig-file.md @@ -97,7 +97,7 @@ The rules for loading and merging the kubeconfig files are straightforward, but ## Manipulation of kubeconfig via `kubectl config ` In order to more easily manipulate kubeconfig files, there are a series of subcommands to `kubectl config` to help. -See [user-guide/kubectl/kubectl_config.md](user-guide/kubectl/kubectl_config.md) for help. +See [kubectl/kubectl_config.md](kubectl/kubectl_config.md) for help. ### Example ``` @@ -164,5 +164,5 @@ $kubectl config use-context federal-context -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/kubeconfig-file.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubeconfig-file.md?pixel)]() diff --git a/docs/user-guide/labels.md b/docs/user-guide/labels.md index 7459055322..dc9a915343 100644 --- a/docs/user-guide/labels.md +++ b/docs/user-guide/labels.md @@ -121,5 +121,5 @@ Concerning API: we may extend such filtering to DELETE operations in the future. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/labels.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/labels.md?pixel)]() diff --git a/docs/user-guide/limitrange/README.md b/docs/user-guide/limitrange/README.md index 75110d6561..5dae5c1e3e 100644 --- a/docs/user-guide/limitrange/README.md +++ b/docs/user-guide/limitrange/README.md @@ -43,7 +43,7 @@ For a detailed description of the Kubernetes resource model, see [Resources](htt Step 0: Prerequisites ----------------------------------------- -This example requires a running Kubernetes cluster. See the [Getting Started guides](../../docs/getting-started-guides/) for how to get started. +This example requires a running Kubernetes cluster. See the [Getting Started guides](../../../docs/getting-started-guides/) for how to get started. Change to the `/examples/limitrange` directory if you're not already there. @@ -178,5 +178,5 @@ amount of resource a pod consumes on a node. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/limitrange/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/limitrange/README.md?pixel)]() diff --git a/docs/user-guide/liveness/README.md b/docs/user-guide/liveness/README.md index da379fdd86..70b71601bb 100644 --- a/docs/user-guide/liveness/README.md +++ b/docs/user-guide/liveness/README.md @@ -84,5 +84,5 @@ Sat, 27 Jun 2015 13:44:44 +0200 Sat, 27 Jun 2015 13:44:44 +0200 1 {kube -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/liveness/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/liveness/README.md?pixel)]() diff --git a/docs/user-guide/logging-demo/README.md b/docs/user-guide/logging-demo/README.md index ce0cbd6491..4c38de26f9 100644 --- a/docs/user-guide/logging-demo/README.md +++ b/docs/user-guide/logging-demo/README.md @@ -13,7 +13,7 @@ certainly want the docs that go with that version. # Elasticsearch/Kibana Logging Demonstration -This directory contains two [pod](../../docs/pods.md) specifications which can be used as synthetic +This directory contains two [pod](../../../docs/user-guide/pods.md) specifications which can be used as synthetic logging sources. The pod specification in [synthetic_0_25lps.yaml](synthetic_0_25lps.yaml) describes a pod that just emits a log message once every 4 seconds. The pod specification in [synthetic_10lps.yaml](synthetic_10lps.yaml) @@ -21,12 +21,12 @@ describes a pod that just emits 10 log lines per second. To observe the ingested log lines when using Google Cloud Logging please see the getting started instructions -at [Cluster Level Logging to Google Cloud Logging](../../docs/getting-started-guides/logging.md). +at [Cluster Level Logging to Google Cloud Logging](../../../docs/getting-started-guides/logging.md). To observe the ingested log lines when using Elasticsearch and Kibana please see the getting started instructions -at [Cluster Level Logging with Elasticsearch and Kibana](../../docs/getting-started-guides/logging-elasticsearch.md). +at [Cluster Level Logging with Elasticsearch and Kibana](../../../docs/getting-started-guides/logging-elasticsearch.md). -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/logging-demo/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/logging-demo/README.md?pixel)]() diff --git a/docs/user-guide/logging.md b/docs/user-guide/logging.md index a6111ff066..850d4a9c12 100644 --- a/docs/user-guide/logging.md +++ b/docs/user-guide/logging.md @@ -15,12 +15,12 @@ certainly want the docs that go with that version. # Logging ## Logging by Kubernetes Components -Kubernetes components, such as kubelet and apiserver, use the [glog](https://godoc.org/github.com/golang/glog) logging library. Developer conventions for logging severity are described in [devel/logging.md](devel/logging.md). +Kubernetes components, such as kubelet and apiserver, use the [glog](https://godoc.org/github.com/golang/glog) logging library. Developer conventions for logging severity are described in [docs/devel/logging.md](../devel/logging.md). ## Examining the logs of running containers The logs of a running container may be fetched using the command `kubectl logs`. For example, given this pod specification which has a container which writes out some text to standard -output every second [counter-pod.yaml](../examples/blog-logging/counter-pod.yaml): +output every second [counter-pod.yaml](../../examples/blog-logging/counter-pod.yaml): ``` apiVersion: v1 kind: Pod @@ -70,19 +70,19 @@ $ kubectl logs kube-dns-v3-7r1l9 etcd ``` ## Cluster level logging to Google Cloud Logging -The getting started guide [Cluster Level Logging to Google Cloud Logging](getting-started-guides/logging.md) +The getting started guide [Cluster Level Logging to Google Cloud Logging](../getting-started-guides/logging.md) explains how container logs are ingested into [Google Cloud Logging](https://cloud.google.com/logging/docs/) and shows how to query the ingested logs. ## Cluster level logging with Elasticsearch and Kibana -The getting started guide [Cluster Level Logging with Elasticsearch and Kibana](getting-started-guides/logging-elasticsearch.md) +The getting started guide [Cluster Level Logging with Elasticsearch and Kibana](../getting-started-guides/logging-elasticsearch.md) describes how to ingest cluster level logs into Elasticsearch and view them using Kibana. ## Ingesting Application Log Files Cluster level logging only collects the standard output and standard error output of the applications -running in containers. The guide [Collecting log files within containers with Fluentd](../contrib/logging/fluentd-sidecar-gcp/README.md) explains how the log files of applications can also be ingested into Google Cloud logging. +running in containers. The guide [Collecting log files within containers with Fluentd](../../contrib/logging/fluentd-sidecar-gcp/README.md) explains how the log files of applications can also be ingested into Google Cloud logging. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/logging.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/logging.md?pixel)]() diff --git a/docs/user-guide/managing-deployments.md b/docs/user-guide/managing-deployments.md index 79e6d03a93..cbfb8b2089 100644 --- a/docs/user-guide/managing-deployments.md +++ b/docs/user-guide/managing-deployments.md @@ -351,11 +351,11 @@ Update succeeded. Deleting my-nginx my-nginx-v4 ``` -You can also run the [update demo](../../examples/update-demo/) to see a visual representation of the rolling update process. +You can also run the [update demo](update-demo/) to see a visual representation of the rolling update process. ## In-place updates of resources -Sometimes it’s necessary to make narrow, non-disruptive updates to resources you’ve created. For instance, you might want to add an [annotation](../../docs/annotations.md) with a description of your object. That’s easiest to do with `kubectl patch`: +Sometimes it’s necessary to make narrow, non-disruptive updates to resources you’ve created. For instance, you might want to add an [annotation](annotations.md) with a description of your object. That’s easiest to do with `kubectl patch`: ```bash $ kubectl patch rc my-nginx-v4 -p '{"metadata": {"annotations": {"description": "my frontend running nginx"}}}' my-nginx-v4 diff --git a/docs/user-guide/monitoring.md b/docs/user-guide/monitoring.md index 9884b7f9d9..ebd33fd411 100644 --- a/docs/user-guide/monitoring.md +++ b/docs/user-guide/monitoring.md @@ -18,7 +18,7 @@ Understanding how an application behaves when deployed is crucial to scaling the ### Overview -Heapster is a cluster-wide aggregator of monitoring and event data. It currently supports Kubernetes natively and works on all Kubernetes setups. Heapster runs as a pod in the cluster, similar to how any Kubernetes application would run. The Heapster pod discovers all nodes in the cluster and queries usage information from the nodes’ [Kubelet](../DESIGN.md#kubelet)s, the on-machine Kubernetes agent. The Kubelet itself fetches the data from [cAdvisor](https://github.com/google/cadvisor). Heapster groups the information by pod along with the relevant labels. This data is then pushed to a configurable backend for storage and visualization. Currently supported backends include [InfluxDB](http://influxdb.com/) (with [Grafana](http://grafana.org/) for visualization) and [Google Cloud Monitoring](https://cloud.google.com/monitoring/). The overall architecture of the service can be seen below: +Heapster is a cluster-wide aggregator of monitoring and event data. It currently supports Kubernetes natively and works on all Kubernetes setups. Heapster runs as a pod in the cluster, similar to how any Kubernetes application would run. The Heapster pod discovers all nodes in the cluster and queries usage information from the nodes’ [Kubelet](../../DESIGN.md#kubelet)s, the on-machine Kubernetes agent. The Kubelet itself fetches the data from [cAdvisor](https://github.com/google/cadvisor). Heapster groups the information by pod along with the relevant labels. This data is then pushed to a configurable backend for storage and visualization. Currently supported backends include [InfluxDB](http://influxdb.com/) (with [Grafana](http://grafana.org/) for visualization) and [Google Cloud Monitoring](https://cloud.google.com/monitoring/). The overall architecture of the service can be seen below: ![overall monitoring architecture](monitoring-architecture.png) @@ -30,7 +30,7 @@ cAdvisor is an open source container resource usage and performance analysis age On most Kubernetes clusters, cAdvisor exposes a simple UI for on-machine containers on port 4194. Here is a snapshot of part of cAdvisor’s UI that shows the overall machine usage: -![cAdvisor](cadvisor.png) +![cAdvisor](../cadvisor.png) ### Kubelet @@ -61,7 +61,7 @@ Here is a video showing how to setup and run a Google Cloud Monitoring backed He Here is a snapshot of the a Google Cloud Monitoring dashboard showing cluster-wide resource usage. -![Google Cloud Monitoring dashboard](gcm.png) +![Google Cloud Monitoring dashboard](../gcm.png) ## Try it out! Now that you’ve learned a bit about Heapster, feel free to try it out on your own clusters! The [Heapster repository](https://github.com/GoogleCloudPlatform/heapster) is available on GitHub. It contains detailed instructions to setup Heapster and its storage backends. Heapster runs by default on most Kubernetes clusters, so you may already have it! Feedback is always welcome. Please let us know if you run into any issues. Heapster and Kubernetes developers hang out in the [#google-containers](http://webchat.freenode.net/?channels=google-containers) IRC channel on freenode.net. You can also reach us on the [google-containers Google Groups mailing list](https://groups.google.com/forum/#!forum/google-containers). @@ -72,5 +72,5 @@ Now that you’ve learned a bit about Heapster, feel free to try it out on your -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/monitoring.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/monitoring.md?pixel)]() diff --git a/docs/user-guide/namespaces.md b/docs/user-guide/namespaces.md index 74573828ff..5e94ceccf7 100644 --- a/docs/user-guide/namespaces.md +++ b/docs/user-guide/namespaces.md @@ -18,9 +18,9 @@ Namespaces help different projects, teams, or customers to share a kubernetes cl Use of multiple namespaces is optional. For small teams, they may not be needed. -Namespaces are still under development. For now, the best documentation is the [Namespaces Design Document](design/namespaces.md). +Namespaces are still under development. For now, the best documentation is the [Namespaces Design Document](../design/namespaces.md). -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/namespaces.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/namespaces.md?pixel)]() diff --git a/docs/user-guide/node-selection/README.md b/docs/user-guide/node-selection/README.md index 48ea49e5cc..546e47f1df 100644 --- a/docs/user-guide/node-selection/README.md +++ b/docs/user-guide/node-selection/README.md @@ -28,7 +28,7 @@ Then, to add a label to the node you've chosen, run `kubectl label nodes -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/node-selection/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/node-selection/README.md?pixel)]() diff --git a/docs/user-guide/overview.md b/docs/user-guide/overview.md index 5479541843..cbd5613274 100644 --- a/docs/user-guide/overview.md +++ b/docs/user-guide/overview.md @@ -24,17 +24,17 @@ Users can create and manage pods themselves, but Kubernetes drastically simplifi Frequently it is useful to refer to a set of pods, for example to limit the set of pods on which a mutating operation should be performed, or that should be queried for status. As a general mechanism, users can attach to most Kubernetes API objects arbitrary key-value pairs called [labels](labels.md), and then use a set of label selectors (key-value queries over labels) to constrain the target of API operations. Each resource also has a map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about this object, called [annotations](annotations.md). -Kubernetes supports a unique [networking model](admin/networking.md). Kubernetes encourages a flat address space and does not dynamically allocate ports, instead allowing users to select whichever ports are convenient for them. To achieve this, it allocates an IP address for each pod. +Kubernetes supports a unique [networking model](../admin/networking.md). Kubernetes encourages a flat address space and does not dynamically allocate ports, instead allowing users to select whichever ports are convenient for them. To achieve this, it allocates an IP address for each pod. -Modern Internet applications are commonly built by layering micro-services, for example a set of web front-ends talking to a distributed in-memory key-value store talking to a replicated storage service. To facilitate this architecture, Kubernetes offers the [service](services.md) abstraction, which provides a stable IP address and [DNS name](admin/dns.md) that corresponds to a dynamic set of pods such as the set of pods constituting a micro-service. The set is defined using a label selector and thus can refer to any set of pods. When a container running in a Kubernetes pod connects to this address, the connection is forwarded by a local agent (called the kube proxy) running on the source machine, to one of the corresponding back-end containers. The exact back-end is chosen using a round-robin policy to balance load. The kube proxy takes care of tracking the dynamic set of back-ends as pods are replaced by new pods on new hosts, so that the service IP address (and DNS name) never changes. +Modern Internet applications are commonly built by layering micro-services, for example a set of web front-ends talking to a distributed in-memory key-value store talking to a replicated storage service. To facilitate this architecture, Kubernetes offers the [service](services.md) abstraction, which provides a stable IP address and [DNS name](../admin/dns.md) that corresponds to a dynamic set of pods such as the set of pods constituting a micro-service. The set is defined using a label selector and thus can refer to any set of pods. When a container running in a Kubernetes pod connects to this address, the connection is forwarded by a local agent (called the kube proxy) running on the source machine, to one of the corresponding back-end containers. The exact back-end is chosen using a round-robin policy to balance load. The kube proxy takes care of tracking the dynamic set of back-ends as pods are replaced by new pods on new hosts, so that the service IP address (and DNS name) never changes. Every resource in Kubernetes, such as a pod, is identified by a URI and has a UID. Important components of the URI are the kind of object (e.g. pod), the object’s name, and the object’s [namespace](namespaces.md). For a certain object kind, every name is unique within its namespace. In contexts where an object name is provided without a namespace, it is assumed to be in the default namespace. UID is unique across time and space. Other details: -* [API](api.md) -* [Client libraries](client-libraries.md) -* [Command-line interface](user-guide/kubectl/kubectl.md) +* [API](../api.md) +* [Client libraries](../client-libraries.md) +* [Command-line interface](kubectl/kubectl.md) * [UI](ui.md) * [Images and registries](images.md) * [Container environment](container-environment.md) @@ -43,5 +43,5 @@ Other details: -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/overview.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/overview.md?pixel)]() diff --git a/docs/user-guide/persistent-volumes.md b/docs/user-guide/persistent-volumes.md index 7bfc560785..33a92ace6d 100644 --- a/docs/user-guide/persistent-volumes.md +++ b/docs/user-guide/persistent-volumes.md @@ -47,7 +47,7 @@ A `PersistentVolume` (PV) is a piece of networked storage in the cluster that ha A `PersistentVolumeClaim` (PVC) is a request for storage by a user. It is similar to a pod. Pods consume node resources and PVCs consume PV resources. Pods can request specific levels of resources (CPU and Memory). Claims can request specific size and access modes (e.g, can be mounted once read/write or many times read-only). -Please see the [detailed walkthrough with working examples](../examples/persistent-volumes/). +Please see the [detailed walkthrough with working examples](persistent-volumes/). ## Lifecycle of a volume and claim @@ -116,7 +116,7 @@ Each PV contains a spec and status, which is the specification and status of the ### Capacity -Generally, a PV will have a specific storage capacity. This is set using the PV's `capacity` attribute. See the Kubernetes [Resource Model](design/resources.md) to understand the units expected by `capacity`. +Generally, a PV will have a specific storage capacity. This is set using the PV's `capacity` attribute. See the Kubernetes [Resource Model](../design/resources.md) to understand the units expected by `capacity`. Currently, storage size is the only resource that can be set or requested. Future attributes may include IOPS, throughput, etc. @@ -184,7 +184,7 @@ Claims use the same conventions as volumes when requesting storage with specific ### Resources -Claims, like pods, can request specific quantities of a resource. In this case, the request is for storage. The same [resource model](design/resources.md) applies to both volumes and claims. +Claims, like pods, can request specific quantities of a resource. In this case, the request is for storage. The same [resource model](../design/resources.md) applies to both volumes and claims. ## Claims As Volumes @@ -212,5 +212,5 @@ spec: -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/persistent-volumes.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/persistent-volumes.md?pixel)]() diff --git a/docs/user-guide/persistent-volumes/README.md b/docs/user-guide/persistent-volumes/README.md index a554fcc1bd..b900ee0ee7 100644 --- a/docs/user-guide/persistent-volumes/README.md +++ b/docs/user-guide/persistent-volumes/README.md @@ -114,5 +114,5 @@ Enjoy! -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/persistent-volumes/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/persistent-volumes/README.md?pixel)]() diff --git a/docs/user-guide/pod-states.md b/docs/user-guide/pod-states.md index a16e72e633..d715faef79 100644 --- a/docs/user-guide/pod-states.md +++ b/docs/user-guide/pod-states.md @@ -20,7 +20,7 @@ This document covers the lifecycle of a pod. It is not an exhaustive document, ## Pod Phase -As consistent with the overall [API convention](api-conventions.md#typical-status-properties), phase is a simple, high-level summary of the phase of the lifecycle of a pod. It is not intended to be a comprehensive rollup of observations of container-level or even pod-level conditions or other state, nor is it intended to be a comprehensive state machine. +As consistent with the overall [API convention](../api-conventions.md#typical-status-properties), phase is a simple, high-level summary of the phase of the lifecycle of a pod. It is not intended to be a comprehensive rollup of observations of container-level or even pod-level conditions or other state, nor is it intended to be a comprehensive state machine. The number and meanings of `PodPhase` values are tightly guarded. Other than what is documented here, nothing should be assumed about pods with a given `PodPhase`. @@ -120,5 +120,5 @@ If a node dies or is disconnected from the rest of the cluster, some entity with -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/pod-states.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/pod-states.md?pixel)]() diff --git a/docs/user-guide/pods.md b/docs/user-guide/pods.md index 453415a85e..d360ce0aaa 100644 --- a/docs/user-guide/pods.md +++ b/docs/user-guide/pods.md @@ -39,7 +39,7 @@ Like individual application containers, pods are considered to be relatively eph Pods facilitate data sharing and communication among their constituents. -The applications in the pod all use the same network namespace/IP and port space, and can find and communicate with each other using localhost. Each pod has an IP address in a flat shared networking namespace that has full communication with other physical computers and containers across the network. The hostname is set to the pod's Name for the application containers within the pod. [More details on networking](admin/networking.md). +The applications in the pod all use the same network namespace/IP and port space, and can find and communicate with each other using localhost. Each pod has an IP address in a flat shared networking namespace that has full communication with other physical computers and containers across the network. The hostname is set to the pod's Name for the application containers within the pod. [More details on networking](../admin/networking.md). In addition to defining the application containers that run in the pod, the pod specifies a set of shared storage volumes. Volumes enable data to survive container restarts and to be shared among the applications within the pod. @@ -93,5 +93,5 @@ The current best practice for pets is to create a replication controller with `r -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/pods.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/pods.md?pixel)]() diff --git a/docs/user-guide/prereqs.md b/docs/user-guide/prereqs.md index 2e4d202310..e5ae27193d 100644 --- a/docs/user-guide/prereqs.md +++ b/docs/user-guide/prereqs.md @@ -16,7 +16,7 @@ certainly want the docs that go with that version. To deploy and manage applications on Kubernetes, you’ll use the Kubernetes command-line tool, [kubectl](kubectl/kubectl.md). It can be found in the release tar bundle, or can be built from source from github. Ensure that it is executable and in your path. -In order for kubectl to find and access the Kubernetes cluster, it needs a [kubeconfig file](../../docs/kubeconfig-file.md), which is created automatically when creating a cluster using kube-up.sh (see the [getting started guides](../../docs/getting-started-guides/) for more about creating clusters). If you need access to a cluster you didn’t create, see the [Sharing Cluster Access document](../../docs/sharing-clusters.md). +In order for kubectl to find and access the Kubernetes cluster, it needs a [kubeconfig file](kubeconfig-file.md), which is created automatically when creating a cluster using kube-up.sh (see the [getting started guides](../../docs/getting-started-guides/) for more about creating clusters). If you need access to a cluster you didn’t create, see the [Sharing Cluster Access document](sharing-clusters.md). diff --git a/docs/user-guide/production-pods.md b/docs/user-guide/production-pods.md index 7ee1cd4141..5be97f6cdb 100644 --- a/docs/user-guide/production-pods.md +++ b/docs/user-guide/production-pods.md @@ -18,7 +18,7 @@ You’ve seen [how to configure and deploy pods and containers](configuring-cont ## Persistent storage -The container file system only lives as long as the container does, so when a container crashes and restarts, changes to the filesystem will be lost and the container will restart from a clean slate. To access more-persistent storage, outside the container file system, you need a [*volume*](../../docs/volumes.md). This is especially important to stateful applications, such as key-value stores and databases. +The container file system only lives as long as the container does, so when a container crashes and restarts, changes to the filesystem will be lost and the container will restart from a clean slate. To access more-persistent storage, outside the container file system, you need a [*volume*](volumes.md). This is especially important to stateful applications, such as key-value stores and databases. For example, [Redis](http://redis.io/) is a key-value cache and store, which we use in the [guestbook](../../examples/guestbook/) and other examples. We can add a volume to it to store persistent data as follows: ```yaml @@ -47,15 +47,15 @@ spec: - mountPath: /redis-master-data name: data # must match the name of the volume, above ``` -`emptyDir` volumes live for the lifespan of the [pod](../../docs/pods.md), which is longer than the lifespan of any one container, so if the container fails and is restarted, our storage will live on. +`emptyDir` volumes live for the lifespan of the [pod](pods.md), which is longer than the lifespan of any one container, so if the container fails and is restarted, our storage will live on. -In addition to the local disk storage provided by `emptyDir`, Kubernetes supports many different network-attached storage solutions, including PD on GCE and EBS on EC2, which are preferred for critical data, and will handle details such as mounting and unmounting the devices on the nodes. See [the volumes doc](../../docs/volumes.md) for more details. +In addition to the local disk storage provided by `emptyDir`, Kubernetes supports many different network-attached storage solutions, including PD on GCE and EBS on EC2, which are preferred for critical data, and will handle details such as mounting and unmounting the devices on the nodes. See [the volumes doc](volumes.md) for more details. ## Distributing credentials Many applications need credentials, such as passwords, OAuth tokens, and TLS keys, to authenticate with other applications, databases, and services. Storing these credentials in container images or environment variables is less than ideal, since the credentials can then be copied by anyone with access to the image, pod/container specification, host file system, or host Docker daemon. -Kubernetes provides a mechanism, called [*secrets*](../../docs/secrets.md), that facilitates delivery of sensitive credentials to applications. A `Secret` is a simple resource containing a map of data. For instance, a simple secret with a username and password might look as follows: +Kubernetes provides a mechanism, called [*secrets*](secrets.md), that facilitates delivery of sensitive credentials to applications. A `Secret` is a simple resource containing a map of data. For instance, a simple secret with a username and password might look as follows: ```yaml apiVersion: v1 kind: Secret @@ -109,14 +109,14 @@ spec: name: supersecret ``` -For more details, see the [secrets document](../../docs/secrets.md), [example](../../examples/secrets/) and [design doc](../../docs/design/secrets.md). +For more details, see the [secrets document](secrets.md), [example](secrets/) and [design doc](../../docs/design/secrets.md). ## Authenticating with a private image registry -Secrets can also be used to pass [image registry credentials](../../docs/images.md#using-a-private-registry). +Secrets can also be used to pass [image registry credentials](images.md#using-a-private-registry). First, create a `.dockercfg` file, such as running `docker login `. -Then put the resulting `.dockercfg` file into a [secret resource](../../docs/secrets.md). For example: +Then put the resulting `.dockercfg` file into a [secret resource](secrets.md). For example: ``` $ docker login Username: janedoe @@ -162,7 +162,7 @@ spec: ## Helper containers -[Pods](../../docs/pods.md) support running multiple containers co-located together. They can be used to host vertically integrated application stacks, but their primary motivation is to support auxiliary helper programs that assist the primary application. Typical examples are data pullers, data pushers, and proxies. +[Pods](pods.md) support running multiple containers co-located together. They can be used to host vertically integrated application stacks, but their primary motivation is to support auxiliary helper programs that assist the primary application. Typical examples are data pullers, data pushers, and proxies. Such containers typically need to communicate with one another, often through the file system. This can be achieved by mounting the same volume into both containers. An example of this pattern would be a web server with a [program that polls a git repository](../../contrib/git-sync/) for new updates: ```yaml @@ -202,9 +202,9 @@ More examples can be found in our [blog article](http://blog.kubernetes.io/2015/ ## Resource management -Kubernetes’s scheduler will place applications only where they have adequate CPU and memory, but it can only do so if it knows how much [resources they require](../../docs/compute-resources.md). The consequence of specifying too little CPU is that the containers could be starved of CPU if too many other containers were scheduled onto the same node. Similarly, containers could die unpredictably due to running out of memory if no memory were requested, which can be especially likely for large-memory applications. +Kubernetes’s scheduler will place applications only where they have adequate CPU and memory, but it can only do so if it knows how much [resources they require](compute-resources.md). The consequence of specifying too little CPU is that the containers could be starved of CPU if too many other containers were scheduled onto the same node. Similarly, containers could die unpredictably due to running out of memory if no memory were requested, which can be especially likely for large-memory applications. -If no resource requirements are specified, a nominal amount of resources is assumed. (This default is applied via a [LimitRange](../../examples/limitrange/) for the default [Namespace](../../docs/namespaces.md). It can be viewed with `kubectl describe limitrange limits`.) You may explicitly specify the amount of resources required as follows: +If no resource requirements are specified, a nominal amount of resources is assumed. (This default is applied via a [LimitRange](limitrange/) for the default [Namespace](namespaces.md). It can be viewed with `kubectl describe limitrange limits`.) You may explicitly specify the amount of resources required as follows: ```yaml apiVersion: v1 kind: ReplicationController @@ -231,11 +231,11 @@ spec: ``` The container will die due to OOM (out of memory) if it exceeds its specified limit, so specifying a value a little higher than expected generally improves reliability. -If you’re not sure how much resources to request, you can first launch the application without specifying resources, and use [resource usage monitoring](../../docs/monitoring.md) to determine appropriate values. +If you’re not sure how much resources to request, you can first launch the application without specifying resources, and use [resource usage monitoring](monitoring.md) to determine appropriate values. ## Liveness and readiness probes (aka health checks) -Many applications running for long periods of time eventually transition to broken states, and cannot recover except by restarting them. Kubernetes provides [*liveness probes*](../../docs/pod-states.md#container-probes) to detect and remedy such situations. +Many applications running for long periods of time eventually transition to broken states, and cannot recover except by restarting them. Kubernetes provides [*liveness probes*](pod-states.md#container-probes) to detect and remedy such situations. A common way to probe an application is using HTTP, which can be specified as follows: ```yaml @@ -266,13 +266,13 @@ spec: Other times, applications are only temporarily unable to serve, and will recover on their own. Typically in such cases you’d prefer not to kill the application, but don’t want to send it requests, either, since the application won’t respond correctly or at all. A common such scenario is loading large data or configuration files during application startup. Kubernetes provides *readiness probes* to detect and mitigate such situations. Readiness probes are configured similarly to liveness probes, just using the `readinessProbe` field. A pod with containers reporting that they are not ready will not receive traffic through Kubernetes [services](connecting-applications.md). -For more details (e.g., how to specify command-based probes), see the [example in the walkthrough](../../examples/walkthrough/k8s201.md#health-checking), the [standalone example](../../examples/liveness/), and the [documentation](../../docs/pod-states.md#container-probes). +For more details (e.g., how to specify command-based probes), see the [example in the walkthrough](walkthrough/k8s201.md#health-checking), the [standalone example](liveness/), and the [documentation](pod-states.md#container-probes). ## Lifecycle hooks and termination notice Of course, nodes and applications may fail at any time, but many applications benefit from clean shutdown, such as to complete in-flight requests, when the termination of the application is deliberate. To support such cases, Kubernetes supports two kinds of notifications: Kubernetes will send SIGTERM to applications, which can be handled in order to effect graceful termination. SIGKILL is sent 10 seconds later if the application does not terminate sooner. -Kubernetes supports the (optional) specification of a [*pre-stop lifecycle hook*](../../docs/container-environment.md#container-hooks), which will execute prior to sending SIGTERM. +Kubernetes supports the (optional) specification of a [*pre-stop lifecycle hook*](container-environment.md#container-hooks), which will execute prior to sending SIGTERM. The specification of a pre-stop hook is similar to that of probes, but without the timing-related parameters. For example: ```yaml @@ -301,7 +301,7 @@ spec: ## Termination message -In order to achieve a reasonably high level of availability, especially for actively developed applications, it’s important to debug failures quickly. Kubernetes can speed debugging by surfacing causes of fatal errors in a way that can be display using [`kubectl`](kubectl/kubectl.md) or the [UI](../../docs/ui.md), in addition to general [log collection](../../docs/logging.md). It is possible to specify a `terminationMessagePath` where a container will write its “death rattle”, such as assertion failure messages, stack traces, exceptions, and so on. The default path is `/dev/termination-log`. +In order to achieve a reasonably high level of availability, especially for actively developed applications, it’s important to debug failures quickly. Kubernetes can speed debugging by surfacing causes of fatal errors in a way that can be display using [`kubectl`](kubectl/kubectl.md) or the [UI](ui.md), in addition to general [log collection](logging.md). It is possible to specify a `terminationMessagePath` where a container will write its “death rattle”, such as assertion failure messages, stack traces, exceptions, and so on. The default path is `/dev/termination-log`. Here is a toy example: ```yaml diff --git a/docs/user-guide/replication-controller.md b/docs/user-guide/replication-controller.md index a754059b72..90a3fa4f4b 100644 --- a/docs/user-guide/replication-controller.md +++ b/docs/user-guide/replication-controller.md @@ -58,7 +58,7 @@ Note that replication controllers may themselves have labels and would generally Pods may be removed from a replication controller's target set by changing their labels. This technique may be used to remove pods from service for debugging, data recovery, etc. Pods that are removed in this way will be replaced automatically (assuming that the number of replicas is not also changed). -Similarly, deleting a replication controller does not affect the pods it created. Its `replicas` field must first be set to 0 in order to delete the pods controlled. (Note that the client tool, kubectl, provides a single operation, [stop](user-guide/kubectl/kubectl_stop.md) to delete both the replication controller and the pods it controls. However, there is no such operation in the API at the moment) +Similarly, deleting a replication controller does not affect the pods it created. Its `replicas` field must first be set to 0 in order to delete the pods controlled. (Note that the client tool, kubectl, provides a single operation, [stop](kubectl/kubectl_stop.md) to delete both the replication controller and the pods it controls. However, there is no such operation in the API at the moment) ## Responsibilities of the replication controller @@ -89,7 +89,7 @@ Ideally, the rolling update controller would take application readiness into acc The two replication controllers would need to create pods with at least one differentiating label, such as the image tag of the primary container of the pod, since it is typically image updates that motivate rolling updates. Rolling update is implemented in the client tool -[kubectl](user-guide/kubectl/kubectl_rolling-update.md) +[kubectl](kubectl/kubectl_rolling-update.md) ### Multiple release tracks @@ -99,5 +99,5 @@ For instance, a service might target all pods with `tier in (frontend), environm -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/replication-controller.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/replication-controller.md?pixel)]() diff --git a/docs/user-guide/resourcequota/README.md b/docs/user-guide/resourcequota/README.md index c611085332..aee76a4356 100644 --- a/docs/user-guide/resourcequota/README.md +++ b/docs/user-guide/resourcequota/README.md @@ -167,5 +167,5 @@ meet your end goal. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/resourcequota/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/resourcequota/README.md?pixel)]() diff --git a/docs/user-guide/secrets.md b/docs/user-guide/secrets.md index 7c28d31c72..522fcc8be6 100644 --- a/docs/user-guide/secrets.md +++ b/docs/user-guide/secrets.md @@ -66,7 +66,7 @@ The automatic creation and use of API credentials can be disabled or overridden if desired. However, if all you need to do is securely access the apiserver, this is the recommended workflow. -See the [Service Account](service-accounts.md) documentation for more +See the [Service Account](../service-accounts.md) documentation for more information on how Service Accounts work. ### Creating a Secret Manually @@ -84,15 +84,15 @@ data: ``` The data field is a map. Its keys must match -[DNS_SUBDOMAIN](design/identifiers.md), except that leading dots are also +[DNS_SUBDOMAIN](../design/identifiers.md), except that leading dots are also allowed. The values are arbitrary data, encoded using base64. The values of username and password in the example above, before base64 encoding, are `value-1` and `value-2`, respectively, with carriage return and newline characters at the end. -Create the secret using [`kubectl create`](user-guide/kubectl/kubectl_create.md). +Create the secret using [`kubectl create`](kubectl/kubectl_create.md). Once the secret is created, you can: - - create pods that automatically use it via a [Service Account](service-accounts.md). + - create pods that automatically use it via a [Service Account](../service-accounts.md). - modify your pod specification to use the secret ### Manually specifying a Secret to be Mounted on a Pod @@ -141,7 +141,7 @@ Use of imagePullSecrets is desribed in the [images documentation](images.md#spec *This feature is planned but not implemented. See [issue 9902](https://github.com/GoogleCloudPlatform/kubernetes/issues/9902).* -You can reference manually created secrets from a [service account](service-accounts.md). +You can reference manually created secrets from a [service account](../service-accounts.md). Then, pods which use that service account will have `volumeMounts` and/or `imagePullSecrets` added to them. The secrets will be mounted at **TBD**. @@ -207,9 +207,9 @@ change, even if the secret resource is modified. To change the secret used, the original pod must be deleted, and a new pod (perhaps with an identical `PodSpec`) must be created. Therefore, updating a secret follows the same workflow as deploying a new container image. The `kubectl rolling-update` -command can be used ([man page](user-guide/kubectl/kubectl_rolling-update.md)). +command can be used ([man page](kubectl/kubectl_rolling-update.md)). -The [`resourceVersion`](api-conventions.md#concurrency-control-and-consistency) +The [`resourceVersion`](../api-conventions.md#concurrency-control-and-consistency) of the secret is not specified when it is referenced. Therefore, if a secret is updated at about the same time as pods are starting, then it is not defined which version of the secret will be used for the pod. It @@ -500,5 +500,5 @@ Pod level](#use-case-two-containers). -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/secrets.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/secrets.md?pixel)]() diff --git a/docs/user-guide/secrets/README.md b/docs/user-guide/secrets/README.md index 2704628637..cc0d23f6af 100644 --- a/docs/user-guide/secrets/README.md +++ b/docs/user-guide/secrets/README.md @@ -21,7 +21,7 @@ You can learn more about secrets [Here](https://github.com/GoogleCloudPlatform/k This example assumes you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting -started](../../docs/getting-started-guides/) for installation instructions for your platform. +started](../../../docs/getting-started-guides/) for installation instructions for your platform. ## Step One: Create the secret @@ -74,5 +74,5 @@ $ kubectl logs secret-test-pod -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/secrets/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/secrets/README.md?pixel)]() diff --git a/docs/user-guide/security-context.md b/docs/user-guide/security-context.md index b101a06227..b304cec020 100644 --- a/docs/user-guide/security-context.md +++ b/docs/user-guide/security-context.md @@ -14,9 +14,9 @@ certainly want the docs that go with that version. # Security Contexts -A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. See [security context design](design/security_context.md) for more details. +A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. See [security context design](../design/security_context.md) for more details. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/security-context.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/security-context.md?pixel)]() diff --git a/docs/user-guide/services.md b/docs/user-guide/services.md index 897811a04b..808538944b 100644 --- a/docs/user-guide/services.md +++ b/docs/user-guide/services.md @@ -299,7 +299,7 @@ variables will not be populated. DNS does not have this restriction. ### DNS An optional (though strongly recommended) [cluster -add-on](../cluster/addons/README.md) is a DNS server. The +add-on](../../cluster/addons/README.md) is a DNS server. The DNS server watches the Kubernetes API for new `Services` and creates a set of DNS records for each. If DNS has been enabled throughout the cluster then all `Pods` should be able to do name resolution of `Services` automatically. @@ -508,5 +508,5 @@ of which `Pods` they are actually accessing. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/services.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/services.md?pixel)]() diff --git a/docs/user-guide/sharing-clusters.md b/docs/user-guide/sharing-clusters.md index a9b809b055..48e7c22e92 100644 --- a/docs/user-guide/sharing-clusters.md +++ b/docs/user-guide/sharing-clusters.md @@ -120,5 +120,5 @@ Detailed examples and explanation of `kubeconfig` loading/merging rules can be f -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/sharing-clusters.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/sharing-clusters.md?pixel)]() diff --git a/docs/user-guide/simple-nginx.md b/docs/user-guide/simple-nginx.md index fbdca24afc..351cbd735a 100644 --- a/docs/user-guide/simple-nginx.md +++ b/docs/user-guide/simple-nginx.md @@ -14,7 +14,7 @@ certainly want the docs that go with that version. ## Running your first containers in Kubernetes -Ok, you've run one of the [getting started guides](../docs/getting-started-guides/) and you have +Ok, you've run one of the [getting started guides](../../docs/getting-started-guides/) and you have successfully turned up a Kubernetes cluster. Now what? This guide will help you get oriented to Kubernetes and running your first containers on the cluster. @@ -22,7 +22,7 @@ to Kubernetes and running your first containers on the cluster. From this point onwards, it is assumed that `kubectl` is on your path from one of the getting started guides. -The [`kubectl run`](../docs/user-guide/kubectl/kubectl_run.md) line below will create two [nginx](https://registry.hub.docker.com/_/nginx/) [pods](../docs/pods.md) listening on port 80. It will also create a [replication controller](../docs/replication-controller.md) named `my-nginx` to ensure that there are always two pods running. +The [`kubectl run`](kubectl/kubectl_run.md) line below will create two [nginx](https://registry.hub.docker.com/_/nginx/) [pods](pods.md) listening on port 80. It will also create a [replication controller](replication-controller.md) named `my-nginx` to ensure that there are always two pods running. ```bash kubectl run my-nginx --image=nginx --replicas=2 --port=80 @@ -44,7 +44,7 @@ kubectl stop rc my-nginx ``` ### Exposing your pods to the internet. -On some platforms (for example Google Compute Engine) the kubectl command can integrate with your cloud provider to add a [public IP address](../docs/services.md#external-services) for the pods, +On some platforms (for example Google Compute Engine) the kubectl command can integrate with your cloud provider to add a [public IP address](services.md#external-services) for the pods, to do this run: ```bash @@ -65,5 +65,5 @@ is given in a different document. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/simple-nginx.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/simple-nginx.md?pixel)]() diff --git a/docs/user-guide/simple-yaml.md b/docs/user-guide/simple-yaml.md index ca7acd3796..8954bac5c4 100644 --- a/docs/user-guide/simple-yaml.md +++ b/docs/user-guide/simple-yaml.md @@ -56,7 +56,7 @@ kubectl delete pods nginx ``` ### Running a replicated set of containers from a configuration file -To run replicated containers, you need a [Replication Controller](../docs/replication-controller.md). +To run replicated containers, you need a [Replication Controller](replication-controller.md). A replication controller is responsible for ensuring that a specific number of pods exist in the cluster. @@ -96,5 +96,5 @@ kubectl delete rc nginx -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/simple-yaml.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/simple-yaml.md?pixel)]() diff --git a/docs/user-guide/ui.md b/docs/user-guide/ui.md index fddd25b057..e93adbe0b2 100644 --- a/docs/user-guide/ui.md +++ b/docs/user-guide/ui.md @@ -23,7 +23,7 @@ If you find that you're not able to access the UI, it may be because the kube-ui kubectl create -f cluster/addons/kube-ui/kube-ui-rc.yaml --namespace=kube-system kubectl create -f cluster/addons/kube-ui/kube-ui-svc.yaml --namespace=kube-system ``` -Normally, this should be taken care of automatically by the [`kube-addons.sh`](../cluster/saltbase/salt/kube-addons/kube-addons.sh) script that runs on the master. +Normally, this should be taken care of automatically by the [`kube-addons.sh`](../../cluster/saltbase/salt/kube-addons/kube-addons.sh) script that runs on the master. ## Using the UI The Kubernetes UI can be used to introspect your current cluster, such as checking how resources are used, or looking at error messages. You cannot, however, use the UI to modify your cluster. @@ -50,9 +50,9 @@ Other views (Pods, Nodes, Replication Controllers, Services, and Events) simply ![kubernetes UI - Nodes](k8s-ui-nodes.png) ## More Information -For more information, see the [Kubernetes UI development document](../www/README.md) in the www directory. +For more information, see the [Kubernetes UI development document](../../www/README.md) in the www directory. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/ui.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/ui.md?pixel)]() diff --git a/docs/user-guide/update-demo/README.md b/docs/user-guide/update-demo/README.md index aa38cb26e3..59238b6f78 100644 --- a/docs/user-guide/update-demo/README.md +++ b/docs/user-guide/update-demo/README.md @@ -29,11 +29,11 @@ limitations under the License. --> # Live update example -This example demonstrates the usage of Kubernetes to perform a live update on a running group of [pods](../../docs/pods.md). +This example demonstrates the usage of Kubernetes to perform a live update on a running group of [pods](../../../docs/user-guide/pods.md). ### Step Zero: Prerequisites -This example assumes that you have forked the repository and [turned up a Kubernetes cluster](../../docs/getting-started-guides/): +This example assumes that you have forked the repository and [turned up a Kubernetes cluster](../../../docs/getting-started-guides/): ```bash $ cd kubernetes @@ -82,7 +82,7 @@ $ kubectl rolling-update update-demo-nautilus --update-period=10s -f examples/up ``` The rolling-update command in kubectl will do 2 things: -1. Create a new [replication controller](../../docs/replication-controller.md) with a pod template that uses the new image (`gcr.io/google_containers/update-demo:kitten`) +1. Create a new [replication controller](../../../docs/user-guide/replication-controller.md) with a pod template that uses the new image (`gcr.io/google_containers/update-demo:kitten`) 2. Scale the old and new replication controllers until the new controller replaces the old. This will kill the current pods one at a time, spinnning up new ones to replace them. Watch the [demo website](http://localhost:8001/static/index.html), it will update one pod every 10 seconds until all of the pods have the new image. @@ -133,5 +133,5 @@ Note that the images included here are public domain. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/update-demo/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/update-demo/README.md?pixel)]() diff --git a/docs/user-guide/user-guide.md b/docs/user-guide/user-guide.md index a22a486e3f..9ceba011ac 100644 --- a/docs/user-guide/user-guide.md +++ b/docs/user-guide/user-guide.md @@ -16,8 +16,8 @@ certainly want the docs that go with that version. The user guide is intended for anyone who wants to run programs and services on an existing Kubernetes cluster. Setup and administration of a -Kubernetes cluster is described in the [Cluster Admin Guide](admin/README.md). -The [Developer Guide](developer-guide.md) is for anyone wanting to either write code which directly accesses the +Kubernetes cluster is described in the [Cluster Admin Guide](../admin/README.md). +The [Developer Guide](../developer-guide.md) is for anyone wanting to either write code which directly accesses the kubernetes API, or to contribute directly to the kubernetes project. ## Primary concepts @@ -25,7 +25,7 @@ kubernetes API, or to contribute directly to the kubernetes project. * **Overview** ([overview.md](overview.md)): A brief overview of Kubernetes concepts. -* **Nodes** ([admin/node.md](admin/node.md)): A node is a worker machine in Kubernetes. +* **Nodes** ([docs/admin/node.md](../admin/node.md)): A node is a worker machine in Kubernetes. * **Pods** ([pods.md](pods.md)): A pod is a tightly-coupled group of containers with shared volumes. @@ -54,22 +54,22 @@ kubernetes API, or to contribute directly to the kubernetes project. * **Accessing the API and other cluster services via a Proxy** [accessing-the-cluster.md](accessing-the-cluster.md) -* **API Overview** ([api.md](api.md)): Pointers to API documentation on various topics +* **API Overview** ([docs/api.md](../api.md)): Pointers to API documentation on various topics and explanation of Kubernetes's approaches to API changes and API versioning. * **Kubernetes Web Interface** ([ui.md](ui.md)): Accessing the Kubernetes web user interface. -* **Kubectl Command Line Interface** ([user-guide/kubectl/kubectl.md](user-guide/kubectl/kubectl.md)): +* **Kubectl Command Line Interface** ([kubectl/kubectl.md](kubectl/kubectl.md)): The `kubectl` command line reference. * **Sharing Cluster Access** ([sharing-clusters.md](sharing-clusters.md)): How to share client credentials for a kubernetes cluster. -* **Roadmap** ([roadmap.md](roadmap.md)): The set of supported use cases, features, +* **Roadmap** ([docs/roadmap.md](../roadmap.md)): The set of supported use cases, features, docs, and patterns that are required before Kubernetes 1.0. -* **Glossary** ([glossary.md](glossary.md)): Terms and concepts. +* **Glossary** ([docs/glossary.md](../glossary.md)): Terms and concepts. ## Further reading -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/user-guide.md?pixel)]() diff --git a/docs/user-guide/volumes.md b/docs/user-guide/volumes.md index f314ac8cd7..54723054bc 100644 --- a/docs/user-guide/volumes.md +++ b/docs/user-guide/volumes.md @@ -266,9 +266,9 @@ writers simultaneously. __Important: You must have your own NFS server running with the share exported before you can use it__ -See the [NFS example](../examples/nfs/) for more details. +See the [NFS example](../../examples/nfs/) for more details. -For example, [this file](../examples/nfs/nfs-web-pod.yaml) demonstrates how to +For example, [this file](../../examples/nfs/nfs-web-pod.yaml) demonstrates how to specify the usage of an NFS volume within a pod. In this example one can see that a `volumeMount` called "nfs" is being mounted @@ -294,7 +294,7 @@ and then serve it in parallel from as many pods as you need. Unfortunately, iSCSI volumes can only be mounted by a single consumer in read-write mode - no simultaneous readers allowed. -See the [iSCSI example](../examples/iscsi/) for more details. +See the [iSCSI example](../../examples/iscsi/) for more details. ### glusterfs @@ -309,7 +309,7 @@ simultaneously. __Important: You must have your own GlusterFS installation running before you can use it__ -See the [GlusterFS example](../examples/glusterfs/) for more details. +See the [GlusterFS example](../../examples/glusterfs/) for more details. ### rbd @@ -329,7 +329,7 @@ and then serve it in parallel from as many pods as you need. Unfortunately, RBD volumes can only be mounted by a single consumer in read-write mode - no simultaneous readers allowed. -See the [RBD example](../examples/rbd/) for more details. +See the [RBD example](../../examples/rbd/) for more details. ### gitRepo @@ -358,7 +358,7 @@ A `persistentVolumeClaim` volume is used to mount a way for users to "claim" durable storage (such as a GCE PersistentDisk or an iSCSI volume) without knowing the details of the particular cloud environment. -See the [PersistentVolumes example](../examples/persistent-volumes/) for more +See the [PersistentVolumes example](persistent-volumes/) for more details. ## Resources @@ -376,5 +376,5 @@ several media types. -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/volumes.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/volumes.md?pixel)]() diff --git a/docs/user-guide/walkthrough/README.md b/docs/user-guide/walkthrough/README.md index 48766a369a..ad389c6ef4 100644 --- a/docs/user-guide/walkthrough/README.md +++ b/docs/user-guide/walkthrough/README.md @@ -17,7 +17,7 @@ certainly want the docs that go with that version. ## Pods The first atom of Kubernetes is a _pod_. A pod is a collection of containers that are symbiotically grouped. -See [pods](../../docs/pods.md) for more details. +See [pods](../../../docs/user-guide/pods.md) for more details. ### Intro @@ -36,7 +36,7 @@ spec: A pod definition is a declaration of a _desired state_. Desired state is a very important concept in the Kubernetes model. Many things present a desired state to the system, and it is Kubernetes' responsibility to make sure that the current state matches the desired state. For example, when you create a Pod, you declare that you want the containers in it to be running. If the containers happen to not be running (e.g. program failure, ...), Kubernetes will continue to (re-)create them for you in order to drive them to the desired state. This process continues until you delete the Pod. -See the [design document](../../DESIGN.md) for more details. +See the [design document](../../../DESIGN.md) for more details. ### Volumes @@ -80,7 +80,7 @@ In Kubernetes, ```emptyDir``` Volumes live for the lifespan of the Pod, which is If you want to mount a directory that already exists in the file system (e.g. ```/var/logs```) you can use the ```hostPath``` directive. -See [volumes](../../docs/volumes.md) for more details. +See [volumes](../../../docs/user-guide/volumes.md) for more details. ### Multiple Containers @@ -123,9 +123,9 @@ Finally, we have also introduced an environment variable to the ```git-monitor`` ### What's next? Continue on to [Kubernetes 201](k8s201.md) or -for a complete application see the [guestbook example](../guestbook/README.md) +for a complete application see the [guestbook example](../../../examples/guestbook/README.md) -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/walkthrough/README.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/walkthrough/README.md?pixel)]() diff --git a/docs/user-guide/walkthrough/k8s201.md b/docs/user-guide/walkthrough/k8s201.md index 651cea3350..d2c05e159e 100644 --- a/docs/user-guide/walkthrough/k8s201.md +++ b/docs/user-guide/walkthrough/k8s201.md @@ -26,13 +26,13 @@ Having already learned about Pods and how to create them, you may be struck by a kubectl get pods -l name=nginx ``` -Lists all pods who name label matches 'nginx'. Labels are discussed in detail [elsewhere](../../docs/labels.md), but they are a core concept for two additional building blocks for Kubernetes, Replication Controllers and Services +Lists all pods who name label matches 'nginx'. Labels are discussed in detail [elsewhere](../../../docs/user-guide/labels.md), but they are a core concept for two additional building blocks for Kubernetes, Replication Controllers and Services ### Replication Controllers OK, now you have an awesome, multi-container, labelled pod and you want to use it to build an application, you might be tempted to just start building a whole bunch of individual pods, but if you do that, a whole host of operational concerns pop up. For example: how will you scale the number of pods up or down and how will you ensure that all pods are homogenous? -Replication controllers are the objects to answer these questions. A replication controller combines a template for pod creation (a "cookie-cutter" if you will) and a number of desired replicas, into a single Kubernetes object. The replication controller also contains a label selector that identifies the set of objects managed by the replication controller. The replication controller constantly measures the size of this set relative to the desired size, and takes action by creating or deleting pods. The design of replication controllers is discussed in detail [elsewhere](../../docs/replication-controller.md). +Replication controllers are the objects to answer these questions. A replication controller combines a template for pod creation (a "cookie-cutter" if you will) and a number of desired replicas, into a single Kubernetes object. The replication controller also contains a label selector that identifies the set of objects managed by the replication controller. The replication controller constantly measures the size of this set relative to the desired size, and takes action by creating or deleting pods. The design of replication controllers is discussed in detail [elsewhere](../../../docs/user-guide/replication-controller.md). An example replication controller that instantiates two pods running nginx looks like: ```yaml @@ -85,7 +85,7 @@ spec: name: nginx ``` -When created, each service is assigned a unique IP address. This address is tied to the lifespan of the Service, and will not change while the Service is alive. Pods can be configured to talk to the service, and know that communication to the service will be automatically load-balanced out to some pod that is a member of the set identified by the label selector in the Service. Services are described in detail [elsewhere](../../docs/services.md). +When created, each service is assigned a unique IP address. This address is tied to the lifespan of the Service, and will not change while the Service is alive. Pods can be configured to talk to the service, and know that communication to the service will be automatically load-balanced out to some pod that is a member of the set identified by the label selector in the Service. Services are described in detail [elsewhere](../../../docs/user-guide/services.md). ### Health Checking When I write code it never crashes, right? Sadly the [kubernetes issues list](https://github.com/GoogleCloudPlatform/kubernetes/issues) indicates otherwise... @@ -162,9 +162,9 @@ spec: ``` ### What's next? -For a complete application see the [guestbook example](../guestbook/). +For a complete application see the [guestbook example](../../../examples/guestbook/). -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/walkthrough/k8s201.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/walkthrough/k8s201.md?pixel)]() diff --git a/docs/user-guide/working-with-resources.md b/docs/user-guide/working-with-resources.md index bb77e03361..a7dc24a35c 100644 --- a/docs/user-guide/working-with-resources.md +++ b/docs/user-guide/working-with-resources.md @@ -18,8 +18,8 @@ certainly want the docs that go with that version. and who want to learn more about using kubectl to manage resources such as pods and services. Users who want to access the REST API directly, and developers who want to extend the kubernetes API should -refer to the [api conventions](api-conventions.md) and -the [api document](api.md).* +refer to the [api conventions](../api-conventions.md) and +the [api document](../api.md).* ## Resources are Automatically Modified When you create a resource such as pod, and then retrieve the created @@ -50,9 +50,9 @@ The resource we posted had only 9 lines, but the one we got back had 51 lines. If you `diff original.yaml current.yaml`, you can see the fields added to the pod. The system adds fields in several ways: - Some fields are added synchronously with creation of the resource and some are set asynchronously. - - For example: `metadata.uid` is set synchronously. (Read more about [metadata](api-conventions.md#metadata)). - - For example, `status.hostIP` is set only after the pod has been scheduled. This often happens fast, but you may notice pods which do not have this set yet. This is called Late Initialization. (Read mode about [status](api-conventions.md#spec-and-status) and [late initialization](api-conventions.md#late-initialization) ). - - Some fields are set to default values. Some defaults vary by cluster and some are fixed for the API at a certain version. (Read more about [defaulting](api-conventions.md#defaulting)). + - For example: `metadata.uid` is set synchronously. (Read more about [metadata](../api-conventions.md#metadata)). + - For example, `status.hostIP` is set only after the pod has been scheduled. This often happens fast, but you may notice pods which do not have this set yet. This is called Late Initialization. (Read mode about [status](../api-conventions.md#spec-and-status) and [late initialization](../api-conventions.md#late-initialization) ). + - Some fields are set to default values. Some defaults vary by cluster and some are fixed for the API at a certain version. (Read more about [defaulting](../api-conventions.md#defaulting)). - For example, `spec.containers.imagePullPolicy` always defaults to `IfNotPresent` in api v1. - For example, `spec.containers.resources.limits.cpu` may be defaulted to `100m` on some clusters, to some other value on others, and not defaulted at all on others. The API will generally not modify fields that you have set; it just sets ones which were unspecified. @@ -72,5 +72,5 @@ Once there: -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/working-with-resources.md?pixel)]() +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/working-with-resources.md?pixel)]() diff --git a/examples/cassandra/README.md b/examples/cassandra/README.md index 2e75b9ed5e..e9eebd5680 100644 --- a/examples/cassandra/README.md +++ b/examples/cassandra/README.md @@ -27,7 +27,7 @@ This example also has a few code and configuration files needed. To avoid typin This is a somewhat long tutorial. If you want to jump straight to the "do it now" commands, please see the [tl; dr](#tl-dr) at the end. ### Simple Single Pod Cassandra Node -In Kubernetes, the atomic unit of an application is a [_Pod_](../../docs/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. +In Kubernetes, the atomic unit of an application is a [_Pod_](../../docs/user-guide/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. In this simple case, we define a single container running Cassandra for our pod: ```yaml @@ -75,7 +75,7 @@ You may also note that we are setting some Cassandra parameters (```MAX_HEAP_SIZ In theory could create a single Cassandra pod right now but since `KubernetesSeedProvider` needs to learn what nodes are in the Cassandra deployment we need to create a service first. ### Cassandra Service -In Kubernetes a _[Service](../../docs/services.md)_ describes a set of Pods that perform the same task. For example, the set of Pods in a Cassandra cluster can be a Kubernetes Service, or even just the single Pod we created above. An important use for a Service is to create a load balancer which distributes traffic across members of the set of Pods. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods (or the single Pod we've already created) available via the Kubernetes API. This is the way that we use initially use Services with Cassandra. +In Kubernetes a _[Service](../../docs/user-guide/services.md)_ describes a set of Pods that perform the same task. For example, the set of Pods in a Cassandra cluster can be a Kubernetes Service, or even just the single Pod we created above. An important use for a Service is to create a load balancer which distributes traffic across members of the set of Pods. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods (or the single Pod we've already created) available via the Kubernetes API. This is the way that we use initially use Services with Cassandra. Here is the service description: ```yaml @@ -145,7 +145,7 @@ subsets: ### Adding replicated nodes Of course, a single node cluster isn't particularly interesting. The real power of Kubernetes and Cassandra lies in easily building a replicated, scalable Cassandra cluster. -In Kubernetes a _[Replication Controller](../../docs/replication-controller.md)_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. +In Kubernetes a _[Replication Controller](../../docs/user-guide/replication-controller.md)_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. Replication controllers will "adopt" existing pods that match their selector query, so let's create a replication controller with a single replica to adopt our existing Cassandra pod. diff --git a/examples/cluster-dns/README.md b/examples/cluster-dns/README.md index 4fb37cead8..82ace74099 100644 --- a/examples/cluster-dns/README.md +++ b/examples/cluster-dns/README.md @@ -27,7 +27,7 @@ $ hack/dev-build-and-up.sh ### Step One: Create two namespaces -We'll see how cluster DNS works across multiple [namespaces](../../docs/namespaces.md), first we need to create two namespaces: +We'll see how cluster DNS works across multiple [namespaces](../../docs/user-guide/namespaces.md), first we need to create two namespaces: ```shell $ kubectl create -f examples/cluster-dns/namespace-dev.yaml @@ -55,7 +55,7 @@ You can view your cluster name and user name in kubernetes config at ~/.kube/con ### Step Two: Create backend replication controller in each namespace -Use the file [`examples/cluster-dns/dns-backend-rc.yaml`](dns-backend-rc.yaml) to create a backend server [replication controller](../../docs/replication-controller.md) in each namespace. +Use the file [`examples/cluster-dns/dns-backend-rc.yaml`](dns-backend-rc.yaml) to create a backend server [replication controller](../../docs/user-guide/replication-controller.md) in each namespace. ```shell $ kubectl config use-context dev @@ -83,7 +83,7 @@ dns-backend dns-backend ddysher/dns-backend name=dns-backend 1 ### Step Three: Create backend service Use the file [`examples/cluster-dns/dns-backend-service.yaml`](dns-backend-service.yaml) to create -a [service](../../docs/services.md) for the backend server. +a [service](../../docs/user-guide/services.md) for the backend server. ```shell $ kubectl config use-context dev @@ -110,7 +110,7 @@ dns-backend name=dns-backend 10.0.35.246 8000/TCP ### Step Four: Create client pod in one namespace -Use the file [`examples/cluster-dns/dns-frontend-pod.yaml`](dns-frontend-pod.yaml) to create a client [pod](../../docs/pods.md) in dev namespace. The client pod will make a connection to backend and exit. Specifically, it tries to connect to address `http://dns-backend.development.cluster.local:8000`. +Use the file [`examples/cluster-dns/dns-frontend-pod.yaml`](dns-frontend-pod.yaml) to create a client [pod](../../docs/user-guide/pods.md) in dev namespace. The client pod will make a connection to backend and exit. Specifically, it tries to connect to address `http://dns-backend.development.cluster.local:8000`. ```shell $ kubectl config use-context dev diff --git a/examples/elasticsearch/README.md b/examples/elasticsearch/README.md index 7b7972b8a6..3958e0e49a 100644 --- a/examples/elasticsearch/README.md +++ b/examples/elasticsearch/README.md @@ -17,14 +17,14 @@ certainly want the docs that go with that version. This directory contains the source for a Docker image that creates an instance of [Elasticsearch](https://www.elastic.co/products/elasticsearch) 1.5.2 which can be used to automatically form clusters when used -with [replication controllers](../../docs/replication-controller.md). This will not work with the library Elasticsearch image +with [replication controllers](../../docs/user-guide/replication-controller.md). This will not work with the library Elasticsearch image because multicast discovery will not find the other pod IPs needed to form a cluster. This -image detects other Elasticsearch [pods](../../docs/pods.md) running in a specified [namespace](../../docs/namespaces.md) with a given +image detects other Elasticsearch [pods](../../docs/user-guide/pods.md) running in a specified [namespace](../../docs/user-guide/namespaces.md) with a given label selector. The detected instances are used to form a list of peer hosts which are used as part of the unicast discovery mechansim for Elasticsearch. The detection of the peer nodes is done by a program which communicates with the Kubernetes API server to get a list of matching Elasticsearch pods. To enable authenticated -communication this image needs a [secret](../../docs/secrets.md) to be mounted at `/etc/apiserver-secret` +communication this image needs a [secret](../../docs/user-guide/secrets.md) to be mounted at `/etc/apiserver-secret` with the basic authentication username and password. Here is an example replication controller specification that creates 4 instances of Elasticsearch which is in the file @@ -127,7 +127,7 @@ $ kubectl create -f music-rc.yaml --namespace=mytunes replicationcontrollers/music-db ``` -It's also useful to have a [service](../../docs/services.md) with an load balancer for accessing the Elasticsearch +It's also useful to have a [service](../../docs/user-guide/services.md) with an load balancer for accessing the Elasticsearch cluster which can be found in the file [music-service.yaml](music-service.yaml). ``` apiVersion: v1 diff --git a/examples/guestbook-go/README.md b/examples/guestbook-go/README.md index 4e5e6fd3c3..109bbdbd22 100644 --- a/examples/guestbook-go/README.md +++ b/examples/guestbook-go/README.md @@ -37,7 +37,7 @@ This example assumes that you have a working cluster. See the [Getting Started G ### Step One: Create the Redis master pod -Use the `examples/guestbook-go/redis-master-controller.json` file to create a [replication controller](../../docs/replication-controller.md) and Redis master [pod](../../docs/pods.md). The pod runs a Redis key-value server in a container. Using a replication controller is the preferred way to launch long-running pods, even for 1 replica, so that the pod benefits from the self-healing mechanism in Kubernetes (keeps the pods alive). +Use the `examples/guestbook-go/redis-master-controller.json` file to create a [replication controller](../../docs/user-guide/replication-controller.md) and Redis master [pod](../../docs/user-guide/pods.md). The pod runs a Redis key-value server in a container. Using a replication controller is the preferred way to launch long-running pods, even for 1 replica, so that the pod benefits from the self-healing mechanism in Kubernetes (keeps the pods alive). 1. Use the [redis-master-controller.json](redis-master-controller.json) file to create the Redis master replication controller in your Kubernetes cluster by running the `kubectl create -f` *`filename`* command: ```shell @@ -74,7 +74,7 @@ Use the `examples/guestbook-go/redis-master-controller.json` file to create a [r Note: The initial `docker pull` can take a few minutes, depending on network conditions. ### Step Two: Create the Redis master service -A Kubernetes '[service](../../docs/services.md)' is a named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via environment variables or DNS. +A Kubernetes '[service](../../docs/user-guide/services.md)' is a named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via environment variables or DNS. Services find the containers to load balance based on pod labels. The pod that you created in Step One has the label `app=redis` and `role=master`. The selector field of the service determines which pods will receive the traffic sent to the service. diff --git a/examples/guestbook/README.md b/examples/guestbook/README.md index 93ca40b756..4e508a3f64 100644 --- a/examples/guestbook/README.md +++ b/examples/guestbook/README.md @@ -53,9 +53,9 @@ This example requires a running Kubernetes cluster. See the [Getting Started gu **Note**: The redis master in this example is *not* highly available. Making it highly available would be an interesting, but intricate exercise— redis doesn't actually support multi-master deployments at this point in time, so high availability would be a somewhat tricky thing to implement, and might involve periodic serialization to disk, and so on. -To start the redis master, use the file `examples/guestbook/redis-master-controller.yaml`, which describes a single [pod](../../docs/pods.md) running a redis key-value server in a container. +To start the redis master, use the file `examples/guestbook/redis-master-controller.yaml`, which describes a single [pod](../../docs/user-guide/pods.md) running a redis key-value server in a container. -Although we have a single instance of our redis master, we are using a [replication controller](../../docs/replication-controller.md) to enforce that exactly one pod keeps running. E.g., if the node were to go down, the replication controller will ensure that the redis master gets restarted on a healthy node. (In our simplified example, this could result in data loss.) +Although we have a single instance of our redis master, we are using a [replication controller](../../docs/user-guide/replication-controller.md) to enforce that exactly one pod keeps running. E.g., if the node were to go down, the replication controller will ensure that the redis master gets restarted on a healthy node. (In our simplified example, this could result in data loss.) Here is `redis-master-controller.yaml`: @@ -173,7 +173,7 @@ $ docker logs ### Step Two: Fire up the redis master service -A Kubernetes [service](../../docs/services.md) is a named load balancer that proxies traffic to one or more containers. This is done using the [labels](../../docs/labels.md) metadata that we defined in the `redis-master` pod above. As mentioned, we have only one redis master, but we nevertheless want to create a service for it. Why? Because it gives us a deterministic way to route to the single master using an elastic IP. +A Kubernetes [service](../../docs/user-guide/services.md) is a named load balancer that proxies traffic to one or more containers. This is done using the [labels](../../docs/user-guide/labels.md) metadata that we defined in the `redis-master` pod above. As mentioned, we have only one redis master, but we nevertheless want to create a service for it. Why? Because it gives us a deterministic way to route to the single master using an elastic IP. Services find the pods to load balance based on the pods' labels. The pod that you created in [Step One](#step-one-start-up-the-redis-master) has the label `name=redis-master`. diff --git a/examples/hazelcast/README.md b/examples/hazelcast/README.md index d677cf279d..83c3d49ea2 100644 --- a/examples/hazelcast/README.md +++ b/examples/hazelcast/README.md @@ -34,13 +34,13 @@ Source is freely available at: * Docker Trusted Build - https://quay.io/repository/pires/hazelcast-kubernetes ### Simple Single Pod Hazelcast Node -In Kubernetes, the atomic unit of an application is a [_Pod_](../../docs/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. +In Kubernetes, the atomic unit of an application is a [_Pod_](../../docs/user-guide/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. In this case, we shall not run a single Hazelcast pod, because the discovery mechanism now relies on a service definition. ### Adding a Hazelcast Service -In Kubernetes a _[Service](../../docs/services.md)_ describes a set of Pods that perform the same task. For example, the set of nodes in a Hazelcast cluster. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods available via the Kubernetes API. This is actually how our discovery mechanism works, by relying on the service to discover other Hazelcast pods. +In Kubernetes a _[Service](../../docs/user-guide/services.md)_ describes a set of Pods that perform the same task. For example, the set of nodes in a Hazelcast cluster. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods available via the Kubernetes API. This is actually how our discovery mechanism works, by relying on the service to discover other Hazelcast pods. Here is the service description: ```yaml @@ -67,7 +67,7 @@ $ kubectl create -f hazelcast-service.yaml ### Adding replicated nodes The real power of Kubernetes and Hazelcast lies in easily building a replicated, resizable Hazelcast cluster. -In Kubernetes a _[Replication Controller](../../docs/replication-controller.md)_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. +In Kubernetes a _[Replication Controller](../../docs/user-guide/replication-controller.md)_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. Replication Controllers will "adopt" existing pods that match their selector query, so let's create a Replication Controller with a single replica to adopt our existing Hazelcast Pod. diff --git a/examples/kubernetes-namespaces/README.md b/examples/kubernetes-namespaces/README.md index d0ba4172eb..194c4519c1 100644 --- a/examples/kubernetes-namespaces/README.md +++ b/examples/kubernetes-namespaces/README.md @@ -14,11 +14,11 @@ certainly want the docs that go with that version. ## Kubernetes Namespaces -Kubernetes _[namespaces](../../docs/namespaces.md)_ help different projects, teams, or customers to share a Kubernetes cluster. +Kubernetes _[namespaces](../../docs/user-guide/namespaces.md)_ help different projects, teams, or customers to share a Kubernetes cluster. It does this by providing the following: -1. A scope for [Names](../../docs/identifiers.md). +1. A scope for [Names](../../docs/user-guide/identifiers.md). 2. A mechanism to attach authorization and policy to a subsection of the cluster. Use of multiple namespaces is optional. @@ -30,7 +30,7 @@ This example demonstrates how to use Kubernetes namespaces to subdivide your clu This example assumes the following: 1. You have an [existing Kubernetes cluster](../../docs/getting-started-guides/). -2. You have a basic understanding of Kubernetes _[pods](../../docs/pods.md)_, _[services](../../docs/services.md)_, and _[replication controllers](../../docs/replication-controller.md)_. +2. You have a basic understanding of Kubernetes _[pods](../../docs/user-guide/pods.md)_, _[services](../../docs/user-guide/services.md)_, and _[replication controllers](../../docs/user-guide/replication-controller.md)_. ### Step One: Understand the default namespace diff --git a/examples/meteor/README.md b/examples/meteor/README.md index 644d5b674a..0fdd86268b 100644 --- a/examples/meteor/README.md +++ b/examples/meteor/README.md @@ -25,7 +25,7 @@ Meteor uses MongoDB, and we will use the `GCEPersistentDisk` type of volume for persistent storage. Therefore, this example is only applicable to [Google Compute Engine](https://cloud.google.com/compute/). Take a look at the -[volumes documentation](../../docs/volumes.md) for other options. +[volumes documentation](../../docs/user-guide/volumes.md) for other options. First, if you have not already done so: @@ -118,7 +118,7 @@ and make sure the `image:` points to the container you just pushed to the Docker Hub or GCR. We will need to provide MongoDB a persistent Kuberetes volume to -store its data. See the [volumes documentation](../../docs/volumes.md) for +store its data. See the [volumes documentation](../../docs/user-guide/volumes.md) for options. We're going to use Google Compute Engine persistent disks. Create the MongoDB disk by running: ``` @@ -169,7 +169,7 @@ Here we can see the MongoDB host and port information being passed into the Meteor app. The `MONGO_SERVICE...` environment variables are set by Kubernetes, and point to the service named `mongo` specified in [`mongo-service.json`](mongo-service.json). See the [environment -documentation](../../docs/container-environment.md) for more details. +documentation](../../docs/user-guide/container-environment.md) for more details. As you may know, Meteor uses long lasting connections, and requires _sticky sessions_. With Kubernetes you can scale out your app easily @@ -177,7 +177,7 @@ with session affinity. The [`meteor-service.json`](meteor-service.json) file contains `"sessionAffinity": "ClientIP"`, which provides this for us. See the [service -documentation](../../docs/services.md#virtual-ips-and-service-proxies) for +documentation](../../docs/user-guide/services.md#virtual-ips-and-service-proxies) for more information. As mentioned above, the mongo container uses a volume which is mapped diff --git a/examples/mysql-wordpress-pd/README.md b/examples/mysql-wordpress-pd/README.md index ffeff2186c..3759d803f9 100644 --- a/examples/mysql-wordpress-pd/README.md +++ b/examples/mysql-wordpress-pd/README.md @@ -14,17 +14,17 @@ certainly want the docs that go with that version. # Persistent Installation of MySQL and WordPress on Kubernetes -This example describes how to run a persistent installation of [Wordpress](https://wordpress.org/) using the [volumes](../../docs/volumes.md) feature of Kubernetes, and [Google Compute Engine](https://cloud.google.com/compute/docs/disks) [persistent disks](../../docs/volumes.md#gcepersistentdisk). +This example describes how to run a persistent installation of [Wordpress](https://wordpress.org/) using the [volumes](../../docs/user-guide/volumes.md) feature of Kubernetes, and [Google Compute Engine](https://cloud.google.com/compute/docs/disks) [persistent disks](../../docs/user-guide/volumes.md#gcepersistentdisk). We'll use the [mysql](https://registry.hub.docker.com/_/mysql/) and [wordpress](https://registry.hub.docker.com/_/wordpress/) official [Docker](https://www.docker.com/) images for this installation. (The wordpress image includes an Apache server). -We'll create two Kubernetes [pods](../../docs/pods.md) to run mysql and wordpress, both with associated persistent disks, then set up a Kubernetes [service](../../docs/services.md) to front each pod. +We'll create two Kubernetes [pods](../../docs/user-guide/pods.md) to run mysql and wordpress, both with associated persistent disks, then set up a Kubernetes [service](../../docs/user-guide/services.md) to front each pod. This example demonstrates several useful things, including: how to set up and use persistent disks with Kubernetes pods; how to define Kubernetes services to leverage docker-links-compatible service environment variables; and use of an external load balancer to expose the wordpress service externally and make it transparent to the user if the wordpress pod moves to a different cluster node. ## Get started on Google Compute Engine (GCE) -Because we're using the `GCEPersistentDisk` type of volume for persistent storage, this example is only applicable to [Google Compute Engine](https://cloud.google.com/compute/). Take a look at the [volumes documentation](../../docs/volumes.md) for other options. +Because we're using the `GCEPersistentDisk` type of volume for persistent storage, this example is only applicable to [Google Compute Engine](https://cloud.google.com/compute/). Take a look at the [volumes documentation](../../docs/user-guide/volumes.md) for other options. First, if you have not already done so: @@ -48,7 +48,7 @@ Please see the [GCE getting started guide](../../docs/getting-started-guides/gce ## Create two persistent disks -For this WordPress installation, we're going to configure our Kubernetes [pods](../../docs/pods.md) to use [persistent disks](https://cloud.google.com/compute/docs/disks). This means that we can preserve installation state across pod shutdown and re-startup. +For this WordPress installation, we're going to configure our Kubernetes [pods](../../docs/user-guide/pods.md) to use [persistent disks](https://cloud.google.com/compute/docs/disks). This means that we can preserve installation state across pod shutdown and re-startup. You will need to create the disks in the same [GCE zone](https://cloud.google.com/compute/docs/zones) as the Kubernetes cluster. The default setup script will create the cluster in the `us-central1-b` zone, as seen in the [config-default.sh](../../cluster/gce/config-default.sh) file. Replace `$ZONE` below with the appropriate zone. @@ -137,8 +137,8 @@ If you want to do deeper troubleshooting, e.g. if it seems a container is not st ### Start the Mysql service -We'll define and start a [service](../../docs/services.md) that lets other pods access the mysql database on a known port and host. -We will specifically name the service `mysql`. This will let us leverage the support for [Docker-links-compatible](../../docs/services.md#how-do-they-work) service environment variables when we set up the wordpress pod. The wordpress Docker image expects to be linked to a mysql container named `mysql`, as you can see in the "How to use this image" section on the wordpress docker hub [page](https://registry.hub.docker.com/_/wordpress/). +We'll define and start a [service](../../docs/user-guide/services.md) that lets other pods access the mysql database on a known port and host. +We will specifically name the service `mysql`. This will let us leverage the support for [Docker-links-compatible](../../docs/user-guide/services.md#how-do-they-work) service environment variables when we set up the wordpress pod. The wordpress Docker image expects to be linked to a mysql container named `mysql`, as you can see in the "How to use this image" section on the wordpress docker hub [page](https://registry.hub.docker.com/_/wordpress/). So if we label our Kubernetes mysql service `mysql`, the wordpress pod will be able to use the Docker-links-compatible environment variables, defined by Kubernetes, to connect to the database. diff --git a/examples/phabricator/README.md b/examples/phabricator/README.md index 2b7cd45bbe..67717ff352 100644 --- a/examples/phabricator/README.md +++ b/examples/phabricator/README.md @@ -20,7 +20,7 @@ The example combines a web frontend and an external service that provides MySQL ### Step Zero: Prerequisites -This example assumes that you have a basic understanding of kubernetes [services](../../docs/services.md) and that you have forked the repository and [turned up a Kubernetes cluster](../../docs/getting-started-guides/): +This example assumes that you have a basic understanding of kubernetes [services](../../docs/user-guide/services.md) and that you have forked the repository and [turned up a Kubernetes cluster](../../docs/getting-started-guides/): ```shell $ cd kubernetes @@ -35,7 +35,7 @@ In the remaining part of this example we will assume that your instance is named ### Step Two: Turn up the phabricator -To start Phabricator server use the file [`examples/phabricator/phabricator-controller.json`](phabricator-controller.json) which describes a [replication controller](../../docs/replication-controller.md) with a single [pod](../../docs/pods.md) running an Apache server with Phabricator PHP source: +To start Phabricator server use the file [`examples/phabricator/phabricator-controller.json`](phabricator-controller.json) which describes a [replication controller](../../docs/user-guide/replication-controller.md) with a single [pod](../../docs/user-guide/pods.md) running an Apache server with Phabricator PHP source: ```js { @@ -172,7 +172,7 @@ $ kubectl create -f examples/phabricator/authenticator-controller.json ### Step Four: Turn up the phabricator service -A Kubernetes 'service' is a named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via *environment variables*. Services find the containers to load balance based on pod labels. These environment variables are typically referenced in application code, shell scripts, or other places where one node needs to talk to another in a distributed system. You should catch up on [kubernetes services](../../docs/services.md) before proceeding. +A Kubernetes 'service' is a named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via *environment variables*. Services find the containers to load balance based on pod labels. These environment variables are typically referenced in application code, shell scripts, or other places where one node needs to talk to another in a distributed system. You should catch up on [kubernetes services](../../docs/user-guide/services.md) before proceeding. The pod that you created in Step One has the label `name=phabricator`. The selector field of the service determines which pods will receive the traffic sent to the service. Since we are setting up a service for an external application we also need to request external static IP address (otherwise it will be assigned dynamically): diff --git a/examples/redis/README.md b/examples/redis/README.md index 195d49e987..f532760dab 100644 --- a/examples/redis/README.md +++ b/examples/redis/README.md @@ -23,7 +23,7 @@ This example assumes that you have a Kubernetes cluster installed and running, a This is a somewhat long tutorial. If you want to jump straight to the "do it now" commands, please see the [tl; dr](#tl-dr) at the end. ### Turning up an initial master/sentinel pod. -A [_Pod_](../../docs/pods.md) is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. +A [_Pod_](../../docs/user-guide/pods.md) is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. We will used the shared network namespace to bootstrap our Redis cluster. In particular, the very first sentinel needs to know how to find the master (subsequent sentinels just ask the first sentinel). Because all containers in a Pod share a network namespace, the sentinel can simply look at ```$(hostname -i):6379```. @@ -36,7 +36,7 @@ kubectl create -f examples/redis/redis-master.yaml ``` ### Turning up a sentinel service -In Kubernetes a [_Service_](../../docs/services.md) describes a set of Pods that perform the same task. For example, the set of nodes in a Cassandra cluster, or even the single node we created above. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods (or the single Pod we've already created) available via the Kubernetes API. +In Kubernetes a [_Service_](../../docs/user-guide/services.md) describes a set of Pods that perform the same task. For example, the set of nodes in a Cassandra cluster, or even the single node we created above. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods (or the single Pod we've already created) available via the Kubernetes API. In Redis, we will use a Kubernetes Service to provide a discoverable endpoints for the Redis sentinels in the cluster. From the sentinels Redis clients can find the master, and then the slaves and other relevant info for the cluster. This enables new members to join the cluster when failures occur. @@ -50,7 +50,7 @@ kubectl create -f examples/redis/redis-sentinel-service.yaml ### Turning up replicated redis servers So far, what we have done is pretty manual, and not very fault-tolerant. If the ```redis-master``` pod that we previously created is destroyed for some reason (e.g. a machine dying) our Redis service goes away with it. -In Kubernetes a [_Replication Controller_](../../docs/replication-controller.md) is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. +In Kubernetes a [_Replication Controller_](../../docs/user-guide/replication-controller.md) is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. Replication Controllers will "adopt" existing pods that match their selector query, so let's create a Replication Controller with a single replica to adopt our existing Redis server. Here is the replication controller config: [redis-controller.yaml](redis-controller.yaml) diff --git a/examples/rethinkdb/README.md b/examples/rethinkdb/README.md index e873f2687e..4fdba24c7b 100644 --- a/examples/rethinkdb/README.md +++ b/examples/rethinkdb/README.md @@ -134,7 +134,7 @@ since the ui is not stateless when playing with Web Admin UI will cause `Connect **BTW** * `gen_pod.sh` is using to generate pod templates for my local cluster, -the generated pods which is using `nodeSelector` to force k8s to schedule containers to my designate nodes, for I need to access persistent data on my host dirs. Note that one needs to label the node before 'nodeSelector' can work, see this [tutorial](../node-selection/) +the generated pods which is using `nodeSelector` to force k8s to schedule containers to my designate nodes, for I need to access persistent data on my host dirs. Note that one needs to label the node before 'nodeSelector' can work, see this [tutorial](../../docs/user-guide/node-selection/) * see [antmanler/rethinkdb-k8s](https://github.com/antmanler/rethinkdb-k8s) for detail diff --git a/examples/spark/README.md b/examples/spark/README.md index f257876051..52cdb5da71 100644 --- a/examples/spark/README.md +++ b/examples/spark/README.md @@ -38,10 +38,10 @@ instructions for your platform. ## Step One: Start your Master service -The Master [service](../../docs/services.md) is the master (or head) service for a Spark +The Master [service](../../docs/user-guide/services.md) is the master (or head) service for a Spark cluster. -Use the [`examples/spark/spark-master.json`](spark-master.json) file to create a [pod](../../docs/pods.md) running +Use the [`examples/spark/spark-master.json`](spark-master.json) file to create a [pod](../../docs/user-guide/pods.md) running the Master service. ```shell @@ -101,7 +101,7 @@ program. The Spark workers need the Master service to be running. Use the [`examples/spark/spark-worker-controller.json`](spark-worker-controller.json) file to create a -[replication controller](../../docs/replication-controller.md) that manages the worker pods. +[replication controller](../../docs/user-guide/replication-controller.md) that manages the worker pods. ```shell $ kubectl create -f examples/spark/spark-worker-controller.json diff --git a/examples/storm/README.md b/examples/storm/README.md index e6e06b877c..5109403dda 100644 --- a/examples/storm/README.md +++ b/examples/storm/README.md @@ -41,10 +41,10 @@ instructions for your platform. ## Step One: Start your ZooKeeper service -ZooKeeper is a distributed coordination [service](../../docs/services.md) that Storm uses as a +ZooKeeper is a distributed coordination [service](../../docs/user-guide/services.md) that Storm uses as a bootstrap and for state storage. -Use the [`examples/storm/zookeeper.json`](zookeeper.json) file to create a [pod](../../docs/pods.md) running +Use the [`examples/storm/zookeeper.json`](zookeeper.json) file to create a [pod](../../docs/user-guide/pods.md) running the ZooKeeper service. ```shell @@ -128,7 +128,7 @@ The Storm workers need both the ZooKeeper and Nimbus services to be running. Use the [`examples/storm/storm-worker-controller.json`](storm-worker-controller.json) file to create a -[replication controller](../../docs/replication-controller.md) that manages the worker pods. +[replication controller](../../docs/user-guide/replication-controller.md) that manages the worker pods. ```shell $ kubectl create -f examples/storm/storm-worker-controller.json From 2476d78ac24c379cb3fb0f958b2170e00ec1e859 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Tue, 14 Jul 2015 11:50:29 -0700 Subject: [PATCH 3/3] point tests to new doc location --- examples/examples_test.go | 37 ++++++++++--------- .../persistent_volume_claim_binder_test.go | 10 ++--- test/e2e/kubectl.go | 4 +- 3 files changed, 27 insertions(+), 24 deletions(-) diff --git a/examples/examples_test.go b/examples/examples_test.go index db02faa8e9..0e17933c16 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -160,7 +160,7 @@ func TestExampleObjectSchemas(t *testing.T) { "redis-master-service": &api.Service{}, "redis-slave-service": &api.Service{}, }, - "../examples/walkthrough": { + "../docs/user-guide/walkthrough": { "pod1": &api.Pod{}, "pod2": &api.Pod{}, "pod-with-http-healthcheck": &api.Pod{}, @@ -168,22 +168,22 @@ func TestExampleObjectSchemas(t *testing.T) { "replication-controller": &api.ReplicationController{}, "podtemplate": &api.PodTemplate{}, }, - "../examples/update-demo": { + "../docs/user-guide/update-demo": { "kitten-rc": &api.ReplicationController{}, "nautilus-rc": &api.ReplicationController{}, }, - "../examples/persistent-volumes/volumes": { + "../docs/user-guide/persistent-volumes/volumes": { "local-01": &api.PersistentVolume{}, "local-02": &api.PersistentVolume{}, "gce": &api.PersistentVolume{}, "nfs": &api.PersistentVolume{}, }, - "../examples/persistent-volumes/claims": { + "../docs/user-guide/persistent-volumes/claims": { "claim-01": &api.PersistentVolumeClaim{}, "claim-02": &api.PersistentVolumeClaim{}, "claim-03": &api.PersistentVolumeClaim{}, }, - "../examples/persistent-volumes/simpletest": { + "../docs/user-guide/persistent-volumes/simpletest": { "namespace": &api.Namespace{}, "pod": &api.Pod{}, "service": &api.Service{}, @@ -195,14 +195,16 @@ func TestExampleObjectSchemas(t *testing.T) { "glusterfs-pod": &api.Pod{}, "glusterfs-endpoints": &api.Endpoints{}, }, - "../examples/liveness": { + "../docs/user-guide/liveness": { "exec-liveness": &api.Pod{}, "http-liveness": &api.Pod{}, }, + "../docs/user-guide": { + "multi-pod": nil, + "pod": &api.Pod{}, + "replication": &api.ReplicationController{}, + }, "../examples": { - "multi-pod": nil, - "pod": &api.Pod{}, - "replication": &api.ReplicationController{}, "scheduler-policy-config": &schedulerapi.Policy{}, }, "../examples/rbd/secret": { @@ -231,7 +233,7 @@ func TestExampleObjectSchemas(t *testing.T) { "namespace-dev": &api.Namespace{}, "namespace-prod": &api.Namespace{}, }, - "../examples/downward-api": { + "../docs/user-guide/downward-api": { "dapi-pod": &api.Pod{}, }, "../examples/elasticsearch": { @@ -250,13 +252,13 @@ func TestExampleObjectSchemas(t *testing.T) { "namespace-dev": &api.Namespace{}, "namespace-prod": &api.Namespace{}, }, - "../examples/limitrange": { + "../docs/user-guide/limitrange": { "invalid-pod": &api.Pod{}, "limits": &api.LimitRange{}, "namespace": &api.Namespace{}, "valid-pod": &api.Pod{}, }, - "../examples/logging-demo": { + "../docs/user-guide/logging-demo": { "synthetic_0_25lps": &api.Pod{}, "synthetic_10lps": &api.Pod{}, }, @@ -277,7 +279,7 @@ func TestExampleObjectSchemas(t *testing.T) { "nfs-server-service": &api.Service{}, "nfs-web-pod": &api.Pod{}, }, - "../examples/node-selection": { + "../docs/user-guide/node-selection": { "pod": &api.Pod{}, }, "../examples/openshift-origin": { @@ -296,7 +298,7 @@ func TestExampleObjectSchemas(t *testing.T) { "redis-sentinel-controller": &api.ReplicationController{}, "redis-sentinel-service": &api.Service{}, }, - "../examples/resourcequota": { + "../docs/user-guide/resourcequota": { "namespace": &api.Namespace{}, "limits": &api.LimitRange{}, "quota": &api.ResourceQuota{}, @@ -307,7 +309,7 @@ func TestExampleObjectSchemas(t *testing.T) { "driver-service": &api.Service{}, "rc": &api.ReplicationController{}, }, - "../examples/secrets": { + "../docs/user-guide/secrets": { "secret-pod": &api.Pod{}, "secret": &api.Secret{}, }, @@ -362,6 +364,7 @@ func TestExampleObjectSchemas(t *testing.T) { t.Errorf("Expected no error, Got %v", err) } if tested != len(expected) { + t.Logf("failing path: %q", path) t.Errorf("Expected %d examples, Got %d", len(expected), tested) } } @@ -392,9 +395,9 @@ func TestReadme(t *testing.T) { expectedType []runtime.Object }{ {"../README.md", []runtime.Object{&api.Pod{}}}, - {"../examples/walkthrough/README.md", []runtime.Object{&api.Pod{}}}, + {"../docs/user-guide/walkthrough/README.md", []runtime.Object{&api.Pod{}}}, {"../examples/iscsi/README.md", []runtime.Object{&api.Pod{}}}, - {"../examples/simple-yaml.md", []runtime.Object{&api.Pod{}, &api.ReplicationController{}}}, + {"../docs/user-guide/simple-yaml.md", []runtime.Object{&api.Pod{}, &api.ReplicationController{}}}, } for _, path := range paths { diff --git a/pkg/volumeclaimbinder/persistent_volume_claim_binder_test.go b/pkg/volumeclaimbinder/persistent_volume_claim_binder_test.go index 0f2e106159..858d26ad66 100644 --- a/pkg/volumeclaimbinder/persistent_volume_claim_binder_test.go +++ b/pkg/volumeclaimbinder/persistent_volume_claim_binder_test.go @@ -115,7 +115,7 @@ func TestExampleObjects(t *testing.T) { for name, scenario := range scenarios { o := testclient.NewObjects(api.Scheme, api.Scheme) - if err := testclient.AddObjectsFromPath("../../examples/persistent-volumes/"+name, o, api.Scheme); err != nil { + if err := testclient.AddObjectsFromPath("../../docs/user-guide/persistent-volumes/"+name, o, api.Scheme); err != nil { t.Fatal(err) } @@ -172,10 +172,10 @@ func TestExampleObjects(t *testing.T) { func TestBindingWithExamples(t *testing.T) { api.ForTesting_ReferencesAllowBlankSelfLinks = true o := testclient.NewObjects(api.Scheme, api.Scheme) - if err := testclient.AddObjectsFromPath("../../examples/persistent-volumes/claims/claim-01.yaml", o, api.Scheme); err != nil { + if err := testclient.AddObjectsFromPath("../../docs/user-guide/persistent-volumes/claims/claim-01.yaml", o, api.Scheme); err != nil { t.Fatal(err) } - if err := testclient.AddObjectsFromPath("../../examples/persistent-volumes/volumes/local-01.yaml", o, api.Scheme); err != nil { + if err := testclient.AddObjectsFromPath("../../docs/user-guide/persistent-volumes/volumes/local-01.yaml", o, api.Scheme); err != nil { t.Fatal(err) } @@ -275,10 +275,10 @@ func TestBindingWithExamples(t *testing.T) { func TestMissingFromIndex(t *testing.T) { api.ForTesting_ReferencesAllowBlankSelfLinks = true o := testclient.NewObjects(api.Scheme, api.Scheme) - if err := testclient.AddObjectsFromPath("../../examples/persistent-volumes/claims/claim-01.yaml", o, api.Scheme); err != nil { + if err := testclient.AddObjectsFromPath("../../docs/user-guide/persistent-volumes/claims/claim-01.yaml", o, api.Scheme); err != nil { t.Fatal(err) } - if err := testclient.AddObjectsFromPath("../../examples/persistent-volumes/volumes/local-01.yaml", o, api.Scheme); err != nil { + if err := testclient.AddObjectsFromPath("../../docs/user-guide/persistent-volumes/volumes/local-01.yaml", o, api.Scheme); err != nil { t.Fatal(err) } diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index ecb7ca4e95..ea697fcd54 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -142,7 +142,7 @@ var _ = Describe("Kubectl client", func() { var podPath string BeforeEach(func() { - podPath = filepath.Join(testContext.RepoRoot, "examples/pod.yaml") + podPath = filepath.Join(testContext.RepoRoot, "docs/user-guide/pod.yaml") By("creating the pod") runKubectl("create", "-f", podPath, fmt.Sprintf("--namespace=%v", ns)) checkPodsRunningReady(c, ns, []string{simplePodName}, podStartTimeout) @@ -206,7 +206,7 @@ var _ = Describe("Kubectl client", func() { var podPath string var nsFlag string BeforeEach(func() { - podPath = filepath.Join(testContext.RepoRoot, "examples/pod.yaml") + podPath = filepath.Join(testContext.RepoRoot, "docs/user-guide/pod.yaml") By("creating the pod") nsFlag = fmt.Sprintf("--namespace=%v", ns) runKubectl("create", "-f", podPath, nsFlag)