diff --git a/cmd/mungedocs/example_syncer.go b/cmd/mungedocs/example_syncer.go index 8df6a968e4..ba153b3c46 100644 --- a/cmd/mungedocs/example_syncer.go +++ b/cmd/mungedocs/example_syncer.go @@ -43,7 +43,7 @@ var exampleMungeTagRE = regexp.MustCompile(beginMungeTag(fmt.Sprintf("%s %s", ex // bar: // ``` // -// [Download example](../../examples/guestbook/frontend-controller.yaml) +// [Download example](../../examples/guestbook/frontend-controller.yaml?raw=true) // func syncExamples(filePath string, mlines mungeLines) (mungeLines, error) { var err error @@ -108,7 +108,7 @@ func exampleContent(filePath, linkPath, fileType string) (mungeLines, error) { // remove leading and trailing spaces and newlines trimmedFileContent := strings.TrimSpace(string(dat)) - content := fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, fileRel) + content := fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s?raw=true)", fileType, trimmedFileContent, fileRel) out := getMungeLines(content) return out, nil } diff --git a/cmd/mungedocs/example_syncer_test.go b/cmd/mungedocs/example_syncer_test.go index 84fd8854a1..72c1514a1f 100644 --- a/cmd/mungedocs/example_syncer_test.go +++ b/cmd/mungedocs/example_syncer_test.go @@ -41,11 +41,11 @@ spec: {"", ""}, { "\n\n", - "\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n\n", + "\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml?raw=true)\n\n", }, { "\n\n", - "\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n\n", + "\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml?raw=true)\n\n", }, } repoRoot = "" diff --git a/docs/admin/namespaces/README.md b/docs/admin/namespaces/README.md index f15c29baa0..c866b1e20d 100644 --- a/docs/admin/namespaces/README.md +++ b/docs/admin/namespaces/README.md @@ -98,7 +98,7 @@ Use the file [`namespace-dev.json`](namespace-dev.json) which describes a develo } ``` -[Download example](namespace-dev.json) +[Download example](namespace-dev.json?raw=true) Create the development namespace using kubectl. diff --git a/docs/getting-started-guides/logging.md b/docs/getting-started-guides/logging.md index 95460f9de1..de308b9e84 100644 --- a/docs/getting-started-guides/logging.md +++ b/docs/getting-started-guides/logging.md @@ -73,7 +73,7 @@ spec: 'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done'] ``` -[Download example](../../examples/blog-logging/counter-pod.yaml) +[Download example](../../examples/blog-logging/counter-pod.yaml?raw=true) This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Let’s create the pod in the default @@ -192,7 +192,7 @@ spec: path: /var/lib/docker/containers ``` -[Download example](../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml) +[Download example](../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml?raw=true) This pod specification maps the directory on the host containing the Docker log files, `/var/lib/docker/containers`, to a directory inside the container which has the same path. The pod runs one image, `gcr.io/google_containers/fluentd-gcp:1.6`, which is configured to collect the Docker log files from the logs directory and ingest them into Google Cloud Logging. One instance of this pod runs on each node of the cluster. Kubernetes will notice if this pod fails and automatically restart it. diff --git a/docs/user-guide/downward-api.md b/docs/user-guide/downward-api.md index 9551ec0876..8f625bd6c8 100644 --- a/docs/user-guide/downward-api.md +++ b/docs/user-guide/downward-api.md @@ -108,7 +108,7 @@ spec: restartPolicy: Never ``` -[Download example](downward-api/dapi-pod.yaml) +[Download example](downward-api/dapi-pod.yaml?raw=true) @@ -178,7 +178,7 @@ spec: fieldPath: metadata.annotations ``` -[Download example](downward-api/volume/dapi-volume.yaml) +[Download example](downward-api/volume/dapi-volume.yaml?raw=true) Some more thorough examples: diff --git a/docs/user-guide/logging.md b/docs/user-guide/logging.md index 49af4f527d..c48f9ea7c5 100644 --- a/docs/user-guide/logging.md +++ b/docs/user-guide/logging.md @@ -58,7 +58,7 @@ spec: 'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done'] ``` -[Download example](../../examples/blog-logging/counter-pod.yaml) +[Download example](../../examples/blog-logging/counter-pod.yaml?raw=true) we can run the pod: diff --git a/docs/user-guide/simple-yaml.md b/docs/user-guide/simple-yaml.md index cf5f0c4a1f..aa1bf74342 100644 --- a/docs/user-guide/simple-yaml.md +++ b/docs/user-guide/simple-yaml.md @@ -64,7 +64,7 @@ spec: - containerPort: 80 ``` -[Download example](pod.yaml) +[Download example](pod.yaml?raw=true) You can see your cluster's pods: @@ -116,7 +116,7 @@ spec: - containerPort: 80 ``` -[Download example](replication.yaml) +[Download example](replication.yaml?raw=true) To delete the replication controller (and the pods it created): diff --git a/docs/user-guide/walkthrough/README.md b/docs/user-guide/walkthrough/README.md index 0740e1674f..e5fe13d13c 100644 --- a/docs/user-guide/walkthrough/README.md +++ b/docs/user-guide/walkthrough/README.md @@ -165,7 +165,7 @@ spec: emptyDir: {} ``` -[Download example](pod-redis.yaml) +[Download example](pod-redis.yaml?raw=true) Notes: diff --git a/docs/user-guide/walkthrough/k8s201.md b/docs/user-guide/walkthrough/k8s201.md index 24f9c1d800..229eef033d 100644 --- a/docs/user-guide/walkthrough/k8s201.md +++ b/docs/user-guide/walkthrough/k8s201.md @@ -86,7 +86,7 @@ spec: - containerPort: 80 ``` -[Download example](pod-nginx-with-label.yaml) +[Download example](pod-nginx-with-label.yaml?raw=true) Create the labeled pod ([pod-nginx-with-label.yaml](pod-nginx-with-label.yaml)): @@ -142,7 +142,7 @@ spec: - containerPort: 80 ``` -[Download example](replication-controller.yaml) +[Download example](replication-controller.yaml?raw=true) #### Replication Controller Management @@ -195,7 +195,7 @@ spec: app: nginx ``` -[Download example](service.yaml) +[Download example](service.yaml?raw=true) #### Service Management @@ -311,7 +311,7 @@ spec: - containerPort: 80 ``` -[Download example](pod-with-http-healthcheck.yaml) +[Download example](pod-with-http-healthcheck.yaml?raw=true) For more information about health checking, see [Container Probes](../pod-states.md#container-probes). diff --git a/examples/cassandra/README.md b/examples/cassandra/README.md index ff51ddd499..7adb5c84ae 100644 --- a/examples/cassandra/README.md +++ b/examples/cassandra/README.md @@ -100,7 +100,7 @@ spec: emptyDir: {} ``` -[Download example](cassandra-controller.yaml) +[Download example](cassandra-controller.yaml?raw=true) There are a few things to note in this description. First is that we are running the ```kubernetes/cassandra``` image. This is a standard Cassandra installation on top of Debian. However it also adds a custom [```SeedProvider```](https://svn.apache.org/repos/asf/cassandra/trunk/src/java/org/apache/cassandra/locator/SeedProvider.java) to Cassandra. In Cassandra, a ```SeedProvider``` bootstraps the gossip protocol that Cassandra uses to find other nodes. The ```KubernetesSeedProvider``` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later) @@ -131,7 +131,7 @@ spec: name: cassandra ``` -[Download example](cassandra-service.yaml) +[Download example](cassandra-service.yaml?raw=true) The important thing to note here is the ```selector```. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is ```name=cassandra```. If you look back at the Pod specification above, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service. @@ -241,7 +241,7 @@ spec: emptyDir: {} ``` -[Download example](cassandra-controller.yaml) +[Download example](cassandra-controller.yaml?raw=true) Most of this replication controller definition is identical to the Cassandra pod definition above, it simply gives the replication controller a recipe to use when it creates new Cassandra pods. The other differentiating parts are the ```selector``` attribute which contains the controller's selector query, and the ```replicas``` attribute which specifies the desired number of replicas, in this case 1. diff --git a/examples/celery-rabbitmq/README.md b/examples/celery-rabbitmq/README.md index 04cc646a53..cc2a4db85c 100644 --- a/examples/celery-rabbitmq/README.md +++ b/examples/celery-rabbitmq/README.md @@ -81,7 +81,7 @@ spec: component: rabbitmq ``` -[Download example](rabbitmq-service.yaml) +[Download example](rabbitmq-service.yaml?raw=true) To start the service, run: @@ -126,7 +126,7 @@ spec: cpu: 100m ``` -[Download example](rabbitmq-controller.yaml) +[Download example](rabbitmq-controller.yaml?raw=true) Running `$ kubectl create -f examples/celery-rabbitmq/rabbitmq-controller.yaml` brings up a replication controller that ensures one pod exists which is running a RabbitMQ instance. @@ -167,7 +167,7 @@ spec: cpu: 100m ``` -[Download example](celery-controller.yaml) +[Download example](celery-controller.yaml?raw=true) There are several things to point out here... @@ -238,7 +238,7 @@ spec: type: LoadBalancer ``` -[Download example](flower-service.yaml) +[Download example](flower-service.yaml?raw=true) It is marked as external (LoadBalanced). However on many platforms you will have to add an explicit firewall rule to open port 5555. @@ -279,7 +279,7 @@ spec: cpu: 100m ``` -[Download example](flower-controller.yaml) +[Download example](flower-controller.yaml?raw=true) This will bring up a new pod with Flower installed and port 5555 (Flower's default port) exposed through the service endpoint. This image uses the following command to start Flower: diff --git a/examples/guestbook/README.md b/examples/guestbook/README.md index f6e7102370..40ae072560 100644 --- a/examples/guestbook/README.md +++ b/examples/guestbook/README.md @@ -100,7 +100,7 @@ spec: - containerPort: 6379 ``` -[Download example](redis-master-controller.yaml) +[Download example](redis-master-controller.yaml?raw=true) Change to the `/examples/guestbook` directory if you're not already there. Create the redis master pod in your Kubernetes cluster by running: @@ -227,7 +227,7 @@ spec: name: redis-master ``` -[Download example](redis-master-service.yaml) +[Download example](redis-master-service.yaml?raw=true) Create the service by running: @@ -316,7 +316,7 @@ spec: - containerPort: 6379 ``` -[Download example](redis-slave-controller.yaml) +[Download example](redis-slave-controller.yaml?raw=true) and create the replication controller by running: @@ -367,7 +367,7 @@ spec: name: redis-slave ``` -[Download example](redis-slave-service.yaml) +[Download example](redis-slave-service.yaml?raw=true) This time the selector for the service is `name=redis-slave`, because that identifies the pods running redis slaves. It may also be helpful to set labels on your service itself as we've done here to make it easy to locate them with the `kubectl get services -l "label=value"` command. @@ -426,7 +426,7 @@ spec: - containerPort: 80 ``` -[Download example](frontend-controller.yaml) +[Download example](frontend-controller.yaml?raw=true) Using this file, you can turn up your frontend with: @@ -539,7 +539,7 @@ spec: name: frontend ``` -[Download example](frontend-service.yaml) +[Download example](frontend-service.yaml?raw=true) #### Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific) diff --git a/examples/hazelcast/README.md b/examples/hazelcast/README.md index 5ae17f5c69..269755f45f 100644 --- a/examples/hazelcast/README.md +++ b/examples/hazelcast/README.md @@ -83,7 +83,7 @@ spec: name: hazelcast ``` -[Download example](hazelcast-service.yaml) +[Download example](hazelcast-service.yaml?raw=true) The important thing to note here is the `selector`. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is `name: hazelcast`. If you look at the Replication Controller specification below, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service. @@ -138,7 +138,7 @@ spec: name: hazelcast ``` -[Download example](hazelcast-controller.yaml) +[Download example](hazelcast-controller.yaml?raw=true) There are a few things to note in this description. First is that we are running the `quay.io/pires/hazelcast-kubernetes` image, tag `0.5`. This is a `busybox` installation with JRE 8 Update 45. However it also adds a custom [`application`](https://github.com/pires/hazelcast-kubernetes-bootstrapper) that finds any Hazelcast nodes in the cluster and bootstraps an Hazelcast instance accordingly. The `HazelcastDiscoveryController` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later). diff --git a/examples/mysql-wordpress-pd/README.md b/examples/mysql-wordpress-pd/README.md index 7a496eeedc..34d2fd1a3d 100644 --- a/examples/mysql-wordpress-pd/README.md +++ b/examples/mysql-wordpress-pd/README.md @@ -131,7 +131,7 @@ spec: fsType: ext4 ``` -[Download example](mysql.yaml) +[Download example](mysql.yaml?raw=true) Note that we've defined a volume mount for `/var/lib/mysql`, and specified a volume that uses the persistent disk (`mysql-disk`) that you created. @@ -186,7 +186,7 @@ spec: name: mysql ``` -[Download example](mysql-service.yaml) +[Download example](mysql-service.yaml?raw=true) Start the service like this: @@ -241,7 +241,7 @@ spec: fsType: ext4 ``` -[Download example](wordpress.yaml) +[Download example](wordpress.yaml?raw=true) Create the pod: @@ -282,7 +282,7 @@ spec: type: LoadBalancer ``` -[Download example](wordpress-service.yaml) +[Download example](wordpress-service.yaml?raw=true) Note the `type: LoadBalancer` setting. This will set up the wordpress service behind an external IP. diff --git a/examples/phabricator/README.md b/examples/phabricator/README.md index 4aa15260ab..d9fca4bbe1 100644 --- a/examples/phabricator/README.md +++ b/examples/phabricator/README.md @@ -98,7 +98,7 @@ To start Phabricator server use the file [`examples/phabricator/phabricator-cont } ``` -[Download example](phabricator-controller.json) +[Download example](phabricator-controller.json?raw=true) Create the phabricator pod in your Kubernetes cluster by running: @@ -188,7 +188,7 @@ To automate this process and make sure that a proper host is authorized even if } ``` -[Download example](authenticator-controller.json) +[Download example](authenticator-controller.json?raw=true) To create the pod run: @@ -237,7 +237,7 @@ Use the file [`examples/phabricator/phabricator-service.json`](phabricator-servi } ``` -[Download example](phabricator-service.json) +[Download example](phabricator-service.json?raw=true) To create the service run: