mirror of https://github.com/k3s-io/k3s
commit
0b183f43fd
|
@ -76,7 +76,7 @@ coreos:
|
|||
ExecStartPre=/bin/bash -c 'while [[ \"\$(curl -s http://127.0.0.1:8080/healthz)\" != \"ok\" ]]; do sleep 1; done'
|
||||
ExecStartPre=/bin/sleep 10
|
||||
ExecStart=/opt/kubernetes/bin/kubectl create -f /opt/kubernetes/addons
|
||||
ExecStop=/opt/kubernetes/bin/kubectl stop -f /opt/kubernetes/addons
|
||||
ExecStop=/opt/kubernetes/bin/kubectl delete -f /opt/kubernetes/addons
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
|
|
|
@ -87,10 +87,10 @@ done
|
|||
grep "Exited ([^0])" output.txt
|
||||
```
|
||||
|
||||
Eventually you will have sufficient runs for your purposes. At that point you can stop and delete the replication controller by running:
|
||||
Eventually you will have sufficient runs for your purposes. At that point you can delete the replication controller by running:
|
||||
|
||||
```sh
|
||||
kubectl stop replicationcontroller flakecontroller
|
||||
kubectl delete replicationcontroller flakecontroller
|
||||
```
|
||||
|
||||
If you do a final check for flakes with `docker ps -a`, ignore tasks that exited -1, since that's what happens when you stop the replication controller.
|
||||
|
|
|
@ -699,7 +699,7 @@ List Kubernetes
|
|||
|
||||
Kill all pods:
|
||||
|
||||
for i in `kubectl get pods | awk '{print $1}'`; do kubectl stop pod $i; done
|
||||
for i in `kubectl get pods | awk '{print $1}'`; do kubectl delete pod $i; done
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
|
@ -123,10 +123,10 @@ root 479 0.0 0.0 4348 812 ? S 00:05 0:00 sleep 1
|
|||
root 480 0.0 0.0 15572 2212 ? R 00:05 0:00 ps aux
|
||||
```
|
||||
|
||||
What happens if for any reason the image in this pod is killed off and then restarted by Kubernetes? Will we still see the log lines from the previous invocation of the container followed by the log lines for the started container? Or will we lose the log lines from the original container’s execution and only see the log lines for the new container? Let’s find out. First let’s stop the currently running counter.
|
||||
What happens if for any reason the image in this pod is killed off and then restarted by Kubernetes? Will we still see the log lines from the previous invocation of the container followed by the log lines for the started container? Or will we lose the log lines from the original container’s execution and only see the log lines for the new container? Let’s find out. First let’s delete the currently running counter.
|
||||
|
||||
```console
|
||||
$ kubectl stop pod counter
|
||||
$ kubectl delete pod counter
|
||||
pods/counter
|
||||
```
|
||||
|
||||
|
|
|
@ -59,10 +59,10 @@ You can also see the replication controller that was created:
|
|||
kubectl get rc
|
||||
```
|
||||
|
||||
To stop the two replicated containers, stop the replication controller:
|
||||
To stop the two replicated containers, delete the replication controller:
|
||||
|
||||
```bash
|
||||
kubectl stop rc my-nginx
|
||||
kubectl delete rc my-nginx
|
||||
```
|
||||
|
||||
### Exposing your pods to the internet.
|
||||
|
|
|
@ -622,10 +622,10 @@ For Google Compute Engine details about limiting traffic to specific sources, se
|
|||
|
||||
### Step Seven: Cleanup
|
||||
|
||||
If you are in a live kubernetes cluster, you can just kill the pods by stopping the replication controllers and deleting the services. Using labels to select the resources to stop or delete is an easy way to do this in one command.
|
||||
If you are in a live kubernetes cluster, you can just kill the pods by deleting the replication controllers and the services. Using labels to select the resources to stop or delete is an easy way to do this in one command.
|
||||
|
||||
```console
|
||||
kubectl stop rc -l "name in (redis-master, redis-slave, frontend)"
|
||||
kubectl delete rc -l "name in (redis-master, redis-slave, frontend)"
|
||||
kubectl delete service -l "name in (redis-master, redis-slave, frontend)"
|
||||
```
|
||||
|
||||
|
|
|
@ -15,6 +15,6 @@
|
|||
# limitations under the License.
|
||||
|
||||
echo "Deleting Phabricator service" && kubectl delete -f phabricator-service.json
|
||||
echo "Deleting Phabricator replication controller" && kubectl stop rc phabricator-controller
|
||||
echo "Deleting Phabricator replication controller" && kubectl delete rc phabricator-controller
|
||||
echo "Delete firewall rule" && gcloud compute firewall-rules delete -q phabricator-node-80
|
||||
|
||||
|
|
|
@ -27,8 +27,8 @@ cells=`echo $CELLS | tr ',' ' '`
|
|||
|
||||
# Delete replication controllers
|
||||
for cell in 'global' $cells; do
|
||||
echo "Stopping etcd replicationcontroller for $cell cell..."
|
||||
$KUBECTL stop replicationcontroller etcd-$cell
|
||||
echo "Deleting etcd replicationcontroller for $cell cell..."
|
||||
$KUBECTL delete replicationcontroller etcd-$cell
|
||||
|
||||
echo "Deleting etcd service for $cell cell..."
|
||||
$KUBECTL delete service etcd-$cell
|
||||
|
|
|
@ -21,8 +21,8 @@ set -e
|
|||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Stopping guestbook replicationcontroller..."
|
||||
$KUBECTL stop replicationcontroller guestbook
|
||||
echo "Deleting guestbook replicationcontroller..."
|
||||
$KUBECTL delete replicationcontroller guestbook
|
||||
|
||||
echo "Deleting guestbook service..."
|
||||
$KUBECTL delete service guestbook
|
||||
|
|
|
@ -21,8 +21,8 @@ set -e
|
|||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Stopping vtctld replicationcontroller..."
|
||||
$KUBECTL stop replicationcontroller vtctld
|
||||
echo "Deleting vtctld replicationcontroller..."
|
||||
$KUBECTL delete replicationcontroller vtctld
|
||||
|
||||
echo "Deleting vtctld service..."
|
||||
$KUBECTL delete service vtctld
|
||||
|
|
|
@ -21,8 +21,8 @@ set -e
|
|||
script_root=`dirname "${BASH_SOURCE}"`
|
||||
source $script_root/env.sh
|
||||
|
||||
echo "Stopping vtgate replicationcontroller..."
|
||||
$KUBECTL stop replicationcontroller vtgate
|
||||
echo "Deleting vtgate replicationcontroller..."
|
||||
$KUBECTL delete replicationcontroller vtgate
|
||||
|
||||
echo "Deleting vtgate service..."
|
||||
$KUBECTL delete service vtgate
|
||||
|
|
|
@ -385,7 +385,7 @@ runTests() {
|
|||
# Pre-condition: valid-pod and redis-proxy PODs are running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
|
||||
# Command
|
||||
kubectl stop pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # stop multiple pods at once
|
||||
kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # delete multiple pods at once
|
||||
# Post-condition: no POD is running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
|
@ -720,7 +720,7 @@ __EOF__
|
|||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
|
||||
kubectl stop rc frontend "${kube_flags[@]}"
|
||||
kubectl delete rc frontend "${kube_flags[@]}"
|
||||
# Post-condition: no pods from frontend controller
|
||||
kube::test::get_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
|
@ -841,7 +841,7 @@ __EOF__
|
|||
# Pre-condition: frontend replication controller is running
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
|
||||
# Command
|
||||
kubectl stop rc frontend "${kube_flags[@]}"
|
||||
kubectl delete rc frontend "${kube_flags[@]}"
|
||||
# Post-condition: no replication controller is running
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
|
@ -858,7 +858,7 @@ __EOF__
|
|||
# Pre-condition: frontend and redis-slave
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
|
||||
# Command
|
||||
kubectl stop rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once
|
||||
kubectl delete rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once
|
||||
# Post-condition: no replication controller is running
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
|
|
|
@ -24,4 +24,4 @@ counter5000:
|
|||
kubectl scale rc counter --replicas=5000
|
||||
|
||||
stop:
|
||||
kubectl stop rc counter
|
||||
kubectl delete rc counter
|
||||
|
|
|
@ -15,7 +15,7 @@ rc:
|
|||
kubectl create --validate -f cauldron-rc.yaml
|
||||
|
||||
stop:
|
||||
kubectl stop rc cauldron
|
||||
kubectl delete rc cauldron
|
||||
|
||||
get:
|
||||
kubectl get rc,pods -l app=cauldron
|
||||
|
|
Loading…
Reference in New Issue