2015-04-22 12:25:28 +00:00
|
|
|
{
|
|
|
|
"kind": "ReplicationController",
|
2015-07-01 06:36:39 +00:00
|
|
|
"apiVersion": "v1",
|
2015-04-22 12:25:28 +00:00
|
|
|
"metadata": {
|
|
|
|
"name": "elasticsearch-logging-controller",
|
|
|
|
"namespace": "default",
|
2015-07-01 06:36:39 +00:00
|
|
|
"selfLink": "/api/v1/namespaces/default/replicationcontrollers/elasticsearch-logging-controller",
|
2015-04-22 12:25:28 +00:00
|
|
|
"uid": "aa76f162-e8e5-11e4-8fde-42010af09327",
|
|
|
|
"resourceVersion": "98",
|
|
|
|
"creationTimestamp": "2015-04-22T11:49:43Z",
|
|
|
|
"labels": {
|
|
|
|
"kubernetes.io/cluster-service": "true",
|
|
|
|
"name": "elasticsearch-logging"
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"spec": {
|
|
|
|
"replicas": 1,
|
|
|
|
"selector": {
|
|
|
|
"name": "elasticsearch-logging"
|
|
|
|
},
|
|
|
|
"template": {
|
|
|
|
"metadata": {
|
|
|
|
"creationTimestamp": null,
|
|
|
|
"labels": {
|
|
|
|
"kubernetes.io/cluster-service": "true",
|
|
|
|
"name": "elasticsearch-logging"
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"spec": {
|
|
|
|
"volumes": [
|
|
|
|
{
|
|
|
|
"name": "es-persistent-storage",
|
|
|
|
"hostPath": null,
|
|
|
|
"emptyDir": {
|
|
|
|
"medium": ""
|
|
|
|
},
|
|
|
|
"gcePersistentDisk": null,
|
|
|
|
"awsElasticBlockStore": null,
|
|
|
|
"gitRepo": null,
|
|
|
|
"secret": null,
|
|
|
|
"nfs": null,
|
|
|
|
"iscsi": null,
|
2016-04-20 08:38:19 +00:00
|
|
|
"glusterfs": null,
|
|
|
|
"quobyte": null
|
2015-04-22 12:25:28 +00:00
|
|
|
}
|
|
|
|
],
|
|
|
|
"containers": [
|
|
|
|
{
|
|
|
|
"name": "elasticsearch-logging",
|
Switch to k8s.gcr.io vanity domain
This is the 2nd attempt. The previous was reverted while we figured out
the regional mirrors (oops).
New plan: k8s.gcr.io is a read-only facade that auto-detects your source
region (us, eu, or asia for now) and pulls from the closest. To publish
an image, push k8s-staging.gcr.io and it will be synced to the regionals
automatically (similar to today). For now the staging is an alias to
gcr.io/google_containers (the legacy URL).
When we move off of google-owned projects (working on it), then we just
do a one-time sync, and change the google-internal config, and nobody
outside should notice.
We can, in parallel, change the auto-sync into a manual sync - send a PR
to "promote" something from staging, and a bot activates it. Nice and
visible, easy to keep track of.
2018-01-17 19:36:53 +00:00
|
|
|
"image": "k8s.gcr.io/elasticsearch:1.0",
|
2015-04-22 12:25:28 +00:00
|
|
|
"ports": [
|
|
|
|
{
|
2015-06-27 21:00:31 +00:00
|
|
|
"name": "db",
|
2015-04-22 12:25:28 +00:00
|
|
|
"containerPort": 9200,
|
|
|
|
"protocol": "TCP"
|
|
|
|
},
|
|
|
|
{
|
2015-06-27 21:00:31 +00:00
|
|
|
"name": "transport",
|
2015-04-22 12:25:28 +00:00
|
|
|
"containerPort": 9300,
|
|
|
|
"protocol": "TCP"
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"resources": {},
|
|
|
|
"volumeMounts": [
|
|
|
|
{
|
|
|
|
"name": "es-persistent-storage",
|
|
|
|
"mountPath": "/data"
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"terminationMessagePath": "/dev/termination-log",
|
|
|
|
"imagePullPolicy": "IfNotPresent",
|
|
|
|
"capabilities": {}
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"restartPolicy": "Always",
|
|
|
|
"dnsPolicy": "ClusterFirst"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"status": {
|
|
|
|
"replicas": 1
|
|
|
|
}
|
|
|
|
}
|