mirror of https://github.com/k3s-io/k3s
Fix the service printer to be a single line per service
parent
68bc931c65
commit
79fb674679
|
@ -67,8 +67,8 @@ kubectl expose rc nginx --port=80
|
|||
This should print:
|
||||
|
||||
```console
|
||||
NAME LABELS SELECTOR IP PORT(S)
|
||||
nginx <none> run=nginx <ip-addr> 80/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
nginx 10.179.240.1 <none> 80/TCP run=nginx 8d
|
||||
```
|
||||
|
||||
Hit the webserver:
|
||||
|
|
|
@ -151,11 +151,11 @@ kubectl expose rc nginx --port=80
|
|||
This should print:
|
||||
|
||||
```console
|
||||
NAME LABELS SELECTOR IP PORT(S)
|
||||
nginx run=nginx run=nginx <ip-addr> 80/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
nginx 10.0.93.211 <none> 80/TCP run=nginx 1h
|
||||
```
|
||||
|
||||
If ip-addr is blank run the following command to obtain it. Know issue #10836
|
||||
If `CLUSTER_IP` is blank run the following command to obtain it. Know issue #10836
|
||||
|
||||
```sh
|
||||
kubectl get svc nginx
|
||||
|
|
|
@ -145,15 +145,11 @@ $ kubectl get --all-namespaces services
|
|||
should show a set of [services](../user-guide/services.md) that look something like this:
|
||||
|
||||
```console
|
||||
NAMESPACE NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
default kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.1 443/TCP
|
||||
kube-system kube-dns k8s-app=kube-dns,kubernetes.io/cluster-service=true,kubernetes.io/name=KubeDNS k8s-app=kube-dns 10.0.0.10 53/UDP
|
||||
53/TCP
|
||||
kube-system kube-ui k8s-app=kube-ui,kubernetes.io/cluster-service=true,kubernetes.io/name=KubeUI k8s-app=kube-ui 10.0.59.25 80/TCP
|
||||
kube-system monitoring-grafana kubernetes.io/cluster-service=true,kubernetes.io/name=Grafana k8s-app=influxGrafana 10.0.41.246 80/TCP
|
||||
kube-system monitoring-heapster kubernetes.io/cluster-service=true,kubernetes.io/name=Heapster k8s-app=heapster 10.0.59.48 80/TCP
|
||||
kube-system monitoring-influxdb kubernetes.io/cluster-service=true,kubernetes.io/name=InfluxDB k8s-app=influxGrafana 10.0.210.156 8083/TCP
|
||||
8086/TCP
|
||||
NAMESPACE NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
default kubernetes 10.0.0.1 <none> 443/TCP <none> 1d
|
||||
kube-system kube-dns 10.0.0.2 <none> 53/TCP,53/UDP k8s-app=kube-dns 1d
|
||||
kube-system kube-ui 10.0.0.3 <none> 80/TCP k8s-app=kube-ui 1d
|
||||
... 8086/TCP
|
||||
```
|
||||
|
||||
Similarly, you can take a look at the set of [pods](../user-guide/pods.md) that were created during cluster startup.
|
||||
|
|
|
@ -227,7 +227,7 @@ $ ./cluster/kubectl.sh get pods
|
|||
NAME READY STATUS RESTARTS AGE
|
||||
|
||||
$ ./cluster/kubectl.sh get services
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
|
||||
$ ./cluster/kubectl.sh get replicationcontrollers
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
|
@ -282,11 +282,8 @@ my-nginx-gr3hh 1/1 Running 0 1m
|
|||
my-nginx-xql4j 1/1 Running 0 1m
|
||||
|
||||
$ ./cluster/kubectl.sh get services
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
|
||||
$ ./cluster/kubectl.sh get replicationcontrollers
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
my-nginx my-nginx nginx run=my-nginx 3
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
my-nginx 10.0.0.1 <none> 80/TCP run=my-nginx 1h
|
||||
```
|
||||
|
||||
We did not start any services, hence there are none listed. But we see three replicas displayed properly.
|
||||
|
|
|
@ -133,7 +133,7 @@ Check your Service:
|
|||
|
||||
```console
|
||||
$ kubectl get svc
|
||||
NAME CLUSTER IP EXTERNAL IP PORT(S) SELECTOR AGE
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
kubernetes 10.179.240.1 <none> 443/TCP <none> 8d
|
||||
nginxsvc 10.179.252.126 122.222.183.144 80/TCP,81/TCP,82/TCP run=nginx2 11m
|
||||
```
|
||||
|
@ -196,7 +196,7 @@ Kubernetes offers a DNS cluster addon Service that uses skydns to automatically
|
|||
|
||||
```console
|
||||
$ kubectl get services kube-dns --namespace=kube-system
|
||||
NAME CLUSTER IP EXTERNAL IP PORT(S) SELECTOR AGE
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
kube-dns 10.179.240.10 <none> 53/UDP,53/TCP k8s-app=kube-dns 8d
|
||||
```
|
||||
|
||||
|
@ -413,7 +413,7 @@ Lets now recreate the Service to use a cloud load balancer, just change the `Typ
|
|||
$ kubectl delete rc, svc -l app=nginx
|
||||
$ kubectl create -f ./nginx-app.yaml
|
||||
$ kubectl get svc nginxsvc
|
||||
NAME CLUSTER IP EXTERNAL IP PORT(S) SELECTOR AGE
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
nginxsvc 10.179.252.126 162.222.184.144 80/TCP,81/TCP,82/TCP run=nginx2 13m
|
||||
|
||||
$ curl https://162.22.184.144 -k
|
||||
|
@ -421,7 +421,8 @@ $ curl https://162.22.184.144 -k
|
|||
<title>Welcome to nginx!</title>
|
||||
```
|
||||
|
||||
You can generally tell the external IP of the service, since it will be the one that doesn't start with a `10.*`
|
||||
The IP address in the `EXTERNAL_IP` column is the one that is available on the public internet. The `CLUSTER_IP` is only available inside your
|
||||
cluster/private cloud network.
|
||||
|
||||
## What's next?
|
||||
|
||||
|
|
|
@ -209,16 +209,16 @@ walk-through - you can use your own `Service`'s details here.
|
|||
|
||||
```console
|
||||
$ kubectl expose rc hostnames --port=80 --target-port=9376
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
hostnames app=hostnames app=hostnames 80/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
hostnames 10.0.0.1 <none> 80/TCP run=hostnames 1h
|
||||
```
|
||||
|
||||
And read it back, just to be sure:
|
||||
|
||||
```console
|
||||
$ kubectl get svc hostnames
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
hostnames app=hostnames app=hostnames 10.0.1.175 80/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
hostnames 10.0.0.1 <none> 80/TCP run=hostnames 1h
|
||||
```
|
||||
|
||||
As before, this is the same as if you had started the `Service` with YAML:
|
||||
|
|
|
@ -74,8 +74,6 @@ CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
|||
nginx-app nginx-app nginx run=nginx-app 1
|
||||
# expose a port through with a service
|
||||
$ kubectl expose rc nginx-app --port=80 --name=nginx-http
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
nginx-http run=nginx-app run=nginx-app 80/TCP
|
||||
```
|
||||
|
||||
With kubectl, we create a [replication controller](replication-controller.md) which will make sure that N pods are running nginx (where N is the number of replicas stated in the spec, which defaults to 1). We also create a [service](services.md) with a selector that matches the replication controller's selector. See the [Quick start](quick-start.md) for more information.
|
||||
|
|
|
@ -107,9 +107,9 @@ mypod 1/1 Running 0 1h
|
|||
|
||||
$ kubectl create -f docs/user-guide/persistent-volumes/simpletest/service.json
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
frontendservice <none> name=frontendhttp 10.0.0.241 3000/TCP
|
||||
kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.2 443/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
frontendservice 10.0.0.241 <none> 3000/TCP name=frontendhttp 1d
|
||||
kubernetes 10.0.0.2 <none> 443/TCP <none> 2d
|
||||
```
|
||||
|
||||
## Next steps
|
||||
|
|
|
@ -76,17 +76,20 @@ Through integration with some cloud providers (for example Google Compute Engine
|
|||
|
||||
```console
|
||||
$ kubectl expose rc my-nginx --port=80 --type=LoadBalancer
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
my-nginx run=my-nginx run=my-nginx 80/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
my-nginx 10.179.240.1 <none> 80/TCP run=nginx 8d
|
||||
```
|
||||
|
||||
To find the public IP address assigned to your application, execute:
|
||||
|
||||
```console
|
||||
$ kubectl get svc my-nginx -o json | grep \"ip\"
|
||||
"ip": "130.111.122.213"
|
||||
$ kubectl get svc my-nginx
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
my-nginx 10.179.240.1 25.1.2.3 80/TCP run=nginx 8d
|
||||
```
|
||||
|
||||
You may need to wait for a minute or two for the external ip address to be provisioned.
|
||||
|
||||
In order to access your nginx landing page, you also have to make sure that traffic from external IPs is allowed. Do this by opening a [firewall to allow traffic on port 80](services-firewalls.md).
|
||||
|
||||
## Killing the application
|
||||
|
|
|
@ -113,8 +113,8 @@ Once that's up you can list the service in the cluster:
|
|||
|
||||
```sh
|
||||
$ kubectl get service dns-backend
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
dns-backend <none> name=dns-backend 10.0.236.129 8000/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
dns-backend 10.0.2.3 <none> 8000/TCP name=dns-backend 1d
|
||||
```
|
||||
|
||||
Again, repeat the same process for prod namespace:
|
||||
|
@ -123,8 +123,8 @@ Again, repeat the same process for prod namespace:
|
|||
$ kubectl config use-context prod
|
||||
$ kubectl create -f examples/cluster-dns/dns-backend-service.yaml
|
||||
$ kubectl get service dns-backend
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
dns-backend <none> name=dns-backend 10.0.35.246 8000/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
dns-backend 10.0.2.4 <none> 8000/TCP name=dns-backend 1d
|
||||
```
|
||||
|
||||
### Step Four: Create client pod in one namespace
|
||||
|
|
|
@ -117,8 +117,8 @@ Services find the containers to load balance based on pod labels. The pod that y
|
|||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
redis-master app=redis,role=master app=redis,role=master 10.0.136.3 6379/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||
...
|
||||
```
|
||||
|
||||
|
@ -183,9 +183,9 @@ Just like the master, we want to have a service to proxy connections to the read
|
|||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
redis-master app=redis,role=master app=redis,role=master 10.0.136.3 6379/TCP
|
||||
redis-slave app=redis,role=slave app=redis,role=slave 10.0.21.92 6379/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
|
||||
...
|
||||
```
|
||||
|
||||
|
@ -246,11 +246,10 @@ Just like the others, we create a service to group the guestbook pods but this t
|
|||
|
||||
```
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
guestbook app=guestbook app=guestbook 10.0.217.218 3000/TCP
|
||||
146.148.81.8
|
||||
redis-master app=redis,role=master app=redis,role=master 10.0.136.3 6379/TCP
|
||||
redis-slave app=redis,role=slave app=redis,role=slave 10.0.21.92 6379/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
guestbook 10.0.217.218 146.148.81.8 3000/TCP app=guestbook 1h
|
||||
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
|
||||
...
|
||||
```
|
||||
|
||||
|
|
|
@ -235,8 +235,9 @@ Then check the list of services, which should include the redis-master:
|
|||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP PORT
|
||||
redis-master name=redis-master name=redis-master 10.0.246.242 6379
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||
...
|
||||
```
|
||||
|
||||
This will cause all pods to see the redis master apparently running on <ip>:6379. A service can map an incoming port to any `targetPort` in the backend pod. Once created, the service proxy on each node is configured to set up a proxy on the specified port (in this case port 6379).
|
||||
|
@ -358,9 +359,9 @@ $ kubectl create -f examples/guestbook/redis-slave-service.yaml
|
|||
services/redis-slave
|
||||
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP PORT
|
||||
redis-master name=redis-master name=redis-master 10.0.246.242 6379
|
||||
redis-slave name=redis-slave name=redis-slave 10.0.72.62 6379
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
|
||||
```
|
||||
|
||||
### Step Five: Create the frontend replicated pods
|
||||
|
@ -525,10 +526,10 @@ Then, list all your services again:
|
|||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP PORT(S)
|
||||
frontend name=frontend name=frontend 10.0.93.211 80/TCP
|
||||
redis-master name=redis-master name=redis-master 10.0.246.242 6379/TCP
|
||||
redis-slave name=redis-slave name=redis-slave 10.0.72.62 6379/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
frontend 10.0.93.211 <none> 80/TCP name=frontend 1h
|
||||
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
|
||||
```
|
||||
|
||||
|
||||
|
@ -544,11 +545,10 @@ If the `LoadBalancer` specification is used, it can take a short period for an e
|
|||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP PORT(S)
|
||||
frontend name=frontend name=frontend 10.0.93.211 80/TCP
|
||||
130.211.135.84
|
||||
redis-master name=redis-master name=redis-master 10.0.246.242 6379/TCP
|
||||
redis-slave name=redis-slave name=redis-slave 10.0.72.62 6379/TCP
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
frontend 10.0.93.211 130.211.188.51 80/TCP name=frontend 1h
|
||||
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
|
||||
```
|
||||
|
||||
Once you've exposed the service to an external IP, visit the IP to see your guestbook in action. E.g., `http://130.211.188.51:80` in the example above.
|
||||
|
|
|
@ -302,8 +302,10 @@ $ kubectl get services
|
|||
|
||||
Then, find the external IP for your WordPress service by running:
|
||||
|
||||
```
|
||||
$ kubectl get services/wpfrontend --template="{{range .status.loadBalancer.ingress}} {{.ip}} {{end}}"
|
||||
```console
|
||||
$ kubectl get services/wpfrontend
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
wpfrontend 10.0.0.2 1.2.3.4 80/TCP ... ...
|
||||
```
|
||||
|
||||
or by listing the forwarding rules for your project:
|
||||
|
|
|
@ -247,10 +247,12 @@ $ kubectl create -f examples/phabricator/phabricator-service.json
|
|||
phabricator
|
||||
```
|
||||
|
||||
To play with the service itself, find the external IP of the load balancer:
|
||||
To play with the service itself, find the `EXTERNAL_IP` of the load balancer:
|
||||
|
||||
```sh
|
||||
$ kubectl get services phabricator -o template --template='{{(index .status.loadBalancer.ingress 0).ip}}{{"\n"}}'
|
||||
```console
|
||||
$ kubectl get services phabricator
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
phabricator 10.0.0.2 1.2.3.4 8080/TCP ... ...
|
||||
```
|
||||
|
||||
and then visit port 80 of that IP address.
|
||||
|
|
|
@ -56,9 +56,9 @@ check out:
|
|||
|
||||
```sh
|
||||
$kubectl get services
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
rethinkdb-driver 10.0.27.114 <none> 28015/TCP db=rethinkdb 10m
|
||||
[...]
|
||||
rethinkdb-driver db=influxdb db=rethinkdb 10.0.27.114 28015/TCP
|
||||
```
|
||||
|
||||
**Step 2**
|
||||
|
@ -115,13 +115,12 @@ kubectl create -f examples/rethinkdb/admin-service.yaml
|
|||
|
||||
find the service
|
||||
|
||||
```sh
|
||||
```console
|
||||
$kubectl get services
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
[...]
|
||||
rethinkdb-admin db=influxdb db=rethinkdb,role=admin 10.0.131.19 8080/TCP
|
||||
104.197.19.120
|
||||
rethinkdb-driver db=influxdb db=rethinkdb 10.0.27.114 28015/TCP
|
||||
rethinkdb-admin 10.0.131.19 104.197.19.120 8080/TCP db=rethinkdb,role=admin 10m
|
||||
rethinkdb-driver 10.0.27.114 <none> 28015/TCP db=rethinkdb 20m
|
||||
```
|
||||
|
||||
We request an external load balancer in the [admin-service.yaml](admin-service.yaml) file:
|
||||
|
|
|
@ -91,11 +91,11 @@ zookeeper 1/1 Running 0 43s
|
|||
|
||||
### Check to see if ZooKeeper is accessible
|
||||
|
||||
```sh
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
|
||||
zookeeper name=zookeeper name=zookeeper 10.254.139.141 2181
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
zookeeper 10.254.139.141 <none> 2181/TCP name=zookeeper 10m
|
||||
kubernetes 10.0.0.2 <none> 443/TCP <none> 1d
|
||||
|
||||
$ echo ruok | nc 10.254.139.141 2181; echo
|
||||
imok
|
||||
|
|
|
@ -391,11 +391,9 @@ func ExamplePrintServiceWithNamespacesAndLabels() {
|
|||
fmt.Printf("Unexpected error: %v", err)
|
||||
}
|
||||
// Output:
|
||||
// |NAMESPACE NAME LABELS SELECTOR IP(S) PORT(S) AGE L1|
|
||||
// |ns1 svc1 l1=value s=magic 10.1.1.1 53/UDP 10y value|
|
||||
// | 53/TCP |
|
||||
// |ns2 svc2 l1=dolla-bill-yall s=kazam 10.1.1.2 80/TCP 10y dolla-bill-yall|
|
||||
// | 8080/TCP |
|
||||
// |NAMESPACE NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE L1|
|
||||
// |ns1 svc1 10.1.1.1 unknown 53/UDP,53/TCP s=magic 10y value|
|
||||
// |ns2 svc2 10.1.1.2 unknown 80/TCP,8080/TCP s=kazam 10y dolla-bill-yall|
|
||||
// ||
|
||||
}
|
||||
|
||||
|
|
|
@ -259,7 +259,7 @@ func (h *HumanReadablePrinter) HandledResources() []string {
|
|||
var podColumns = []string{"NAME", "READY", "STATUS", "RESTARTS", "AGE"}
|
||||
var podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"}
|
||||
var replicationControllerColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "REPLICAS", "AGE"}
|
||||
var serviceColumns = []string{"NAME", "LABELS", "SELECTOR", "IP(S)", "PORT(S)", "AGE"}
|
||||
var serviceColumns = []string{"NAME", "CLUSTER_IP", "EXTERNAL_IP", "PORT(S)", "SELECTOR", "AGE"}
|
||||
var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"}
|
||||
var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
|
||||
var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"}
|
||||
|
@ -557,29 +557,52 @@ func printReplicationControllerList(list *api.ReplicationControllerList, w io.Wr
|
|||
return nil
|
||||
}
|
||||
|
||||
func getServiceExternalIP(svc *api.Service) string {
|
||||
switch svc.Spec.Type {
|
||||
case api.ServiceTypeClusterIP:
|
||||
return "<none>"
|
||||
case api.ServiceTypeNodePort:
|
||||
return "nodes"
|
||||
case api.ServiceTypeLoadBalancer:
|
||||
ingress := svc.Status.LoadBalancer.Ingress
|
||||
result := []string{}
|
||||
for i := range ingress {
|
||||
if ingress[i].IP != "" {
|
||||
result = append(result, ingress[i].IP)
|
||||
}
|
||||
}
|
||||
return strings.Join(result, ",")
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func makePortString(ports []api.ServicePort) string {
|
||||
pieces := make([]string, len(ports))
|
||||
for ix := range ports {
|
||||
port := &ports[ix]
|
||||
pieces[ix] = fmt.Sprintf("%d/%s", port.Port, port.Protocol)
|
||||
}
|
||||
return strings.Join(pieces, ",")
|
||||
}
|
||||
|
||||
func printService(svc *api.Service, w io.Writer, withNamespace bool, wide bool, columnLabels []string) error {
|
||||
name := svc.Name
|
||||
namespace := svc.Namespace
|
||||
|
||||
ips := []string{svc.Spec.ClusterIP}
|
||||
|
||||
ingress := svc.Status.LoadBalancer.Ingress
|
||||
for i := range ingress {
|
||||
if ingress[i].IP != "" {
|
||||
ips = append(ips, ingress[i].IP)
|
||||
}
|
||||
}
|
||||
internalIP := svc.Spec.ClusterIP
|
||||
externalIP := getServiceExternalIP(svc)
|
||||
|
||||
if withNamespace {
|
||||
if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d/%s\t%s",
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s",
|
||||
name,
|
||||
formatLabels(svc.Labels),
|
||||
internalIP,
|
||||
externalIP,
|
||||
makePortString(svc.Spec.Ports),
|
||||
formatLabels(svc.Spec.Selector),
|
||||
ips[0], svc.Spec.Ports[0].Port, svc.Spec.Ports[0].Protocol,
|
||||
translateTimestamp(svc.CreationTimestamp),
|
||||
); err != nil {
|
||||
return err
|
||||
|
@ -587,33 +610,6 @@ func printService(svc *api.Service, w io.Writer, withNamespace bool, wide bool,
|
|||
if _, err := fmt.Fprint(w, appendLabels(svc.Labels, columnLabels)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
extraLinePrefix := "\t\t\t"
|
||||
if withNamespace {
|
||||
extraLinePrefix = "\t\t\t\t"
|
||||
}
|
||||
count := len(svc.Spec.Ports)
|
||||
if len(ips) > count {
|
||||
count = len(ips)
|
||||
}
|
||||
for i := 1; i < count; i++ {
|
||||
ip := ""
|
||||
if len(ips) > i {
|
||||
ip = ips[i]
|
||||
}
|
||||
port := ""
|
||||
if len(svc.Spec.Ports) > i {
|
||||
port = fmt.Sprintf("%d/%s", svc.Spec.Ports[i].Port, svc.Spec.Ports[i].Protocol)
|
||||
}
|
||||
// Lay out additional ports.
|
||||
if _, err := fmt.Fprintf(w, "%s%s\t%s", extraLinePrefix, ip, port); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprint(w, appendLabelTabs(columnLabels)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -984,7 +980,7 @@ func appendLabels(itemLabels map[string]string, columnLabels []string) string {
|
|||
if il, ok := itemLabels[cl]; ok {
|
||||
buffer.WriteString(fmt.Sprint(il))
|
||||
} else {
|
||||
buffer.WriteString("<n/a>")
|
||||
buffer.WriteString("<none>")
|
||||
}
|
||||
}
|
||||
buffer.WriteString("\n")
|
||||
|
|
|
@ -632,6 +632,7 @@ func TestPrintHumanReadableService(t *testing.T) {
|
|||
{
|
||||
Spec: api.ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Type: "LoadBalancer",
|
||||
Ports: []api.ServicePort{
|
||||
{
|
||||
Port: 80,
|
||||
|
@ -674,6 +675,7 @@ func TestPrintHumanReadableService(t *testing.T) {
|
|||
{
|
||||
Spec: api.ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Type: "LoadBalancer",
|
||||
Ports: []api.ServicePort{
|
||||
{
|
||||
Port: 80,
|
||||
|
@ -702,6 +704,7 @@ func TestPrintHumanReadableService(t *testing.T) {
|
|||
{
|
||||
Spec: api.ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Type: "LoadBalancer",
|
||||
Ports: []api.ServicePort{
|
||||
{
|
||||
Port: 80,
|
||||
|
@ -758,13 +761,9 @@ func TestPrintHumanReadableService(t *testing.T) {
|
|||
t.Errorf("expected to contain port: %s, but doesn't: %s", portSpec, output)
|
||||
}
|
||||
}
|
||||
// Max of # ports and (# public ip + cluster ip)
|
||||
count := len(svc.Spec.Ports)
|
||||
if len(svc.Status.LoadBalancer.Ingress)+1 > count {
|
||||
count = len(svc.Status.LoadBalancer.Ingress) + 1
|
||||
}
|
||||
if count != strings.Count(output, "\n") {
|
||||
t.Errorf("expected %d newlines, found %d", count, strings.Count(output, "\n"))
|
||||
// Each service should print on one line
|
||||
if 1 != strings.Count(output, "\n") {
|
||||
t.Errorf("expected a single newline, found %d", strings.Count(output, "\n"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue