mirror of https://github.com/k3s-io/k3s
826 lines
36 KiB
Bash
Executable File
826 lines
36 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
# This command checks that the built commands can function together for
|
|
# simple scenarios. It does not require Docker so it can run in travis.
|
|
|
|
set -o errexit
|
|
set -o nounset
|
|
set -o pipefail
|
|
|
|
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
|
source "${KUBE_ROOT}/hack/lib/init.sh"
|
|
source "${KUBE_ROOT}/hack/lib/test.sh"
|
|
|
|
# Stops the running kubectl proxy, if there is one.
|
|
function stop-proxy()
|
|
{
|
|
[[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null
|
|
PROXY_PID=
|
|
}
|
|
|
|
# Starts "kubect proxy" to test the client proxy. You may pass options, e.g.
|
|
# --api-prefix.
|
|
function start-proxy()
|
|
{
|
|
stop-proxy
|
|
|
|
kube::log::status "Starting kubectl proxy"
|
|
# the --www and --www-prefix are just to make something definitely show up for
|
|
# wait_for_url to see.
|
|
kubectl proxy -p ${PROXY_PORT} --www=. --www-prefix=/healthz "$@" 1>&2 &
|
|
PROXY_PID=$!
|
|
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/healthz" "kubectl proxy $@"
|
|
}
|
|
|
|
function cleanup()
|
|
{
|
|
[[ -n "${APISERVER_PID-}" ]] && kill "${APISERVER_PID}" 1>&2 2>/dev/null
|
|
[[ -n "${CTLRMGR_PID-}" ]] && kill "${CTLRMGR_PID}" 1>&2 2>/dev/null
|
|
[[ -n "${KUBELET_PID-}" ]] && kill "${KUBELET_PID}" 1>&2 2>/dev/null
|
|
stop-proxy
|
|
|
|
kube::etcd::cleanup
|
|
rm -rf "${KUBE_TEMP}"
|
|
|
|
kube::log::status "Clean up complete"
|
|
}
|
|
|
|
# Executes curl against the proxy. $1 is the path to use, $2 is the desired
|
|
# return code. Prints a helpful message on failure.
|
|
function check-curl-proxy-code()
|
|
{
|
|
local status
|
|
local -r address=$1
|
|
local -r desired=$2
|
|
local -r full_address="${PROXY_HOST}:${PROXY_PORT}${address}"
|
|
status=$(curl -w "%{http_code}" --silent --output /dev/null "${full_address}")
|
|
if [ "${status}" == "${desired}" ]; then
|
|
return 0
|
|
fi
|
|
echo "For address ${full_address}, got ${status} but wanted ${desired}"
|
|
return 1
|
|
}
|
|
|
|
trap cleanup EXIT SIGINT
|
|
|
|
kube::util::ensure-temp-dir
|
|
kube::etcd::start
|
|
|
|
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
|
|
ETCD_PORT=${ETCD_PORT:-4001}
|
|
API_PORT=${API_PORT:-8080}
|
|
API_HOST=${API_HOST:-127.0.0.1}
|
|
KUBELET_PORT=${KUBELET_PORT:-10250}
|
|
KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248}
|
|
CTLRMGR_PORT=${CTLRMGR_PORT:-10252}
|
|
PROXY_PORT=${PROXY_PORT:-8001}
|
|
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
|
|
|
|
# ensure ~/.kube/config isn't loaded by tests
|
|
HOME="${KUBE_TEMP}"
|
|
|
|
# Check kubectl
|
|
kube::log::status "Running kubectl with no options"
|
|
"${KUBE_OUTPUT_HOSTBIN}/kubectl"
|
|
|
|
kube::log::status "Starting kubelet in masterless mode"
|
|
"${KUBE_OUTPUT_HOSTBIN}/kubelet" \
|
|
--really-crash-for-testing=true \
|
|
--root-dir=/tmp/kubelet.$$ \
|
|
--cert-dir="${TMPDIR:-/tmp/}" \
|
|
--docker-endpoint="fake://" \
|
|
--hostname-override="127.0.0.1" \
|
|
--address="127.0.0.1" \
|
|
--port="$KUBELET_PORT" \
|
|
--healthz-port="${KUBELET_HEALTHZ_PORT}" 1>&2 &
|
|
KUBELET_PID=$!
|
|
kube::util::wait_for_url "http://127.0.0.1:${KUBELET_HEALTHZ_PORT}/healthz" "kubelet(masterless)"
|
|
kill ${KUBELET_PID} 1>&2 2>/dev/null
|
|
|
|
kube::log::status "Starting kubelet in masterful mode"
|
|
"${KUBE_OUTPUT_HOSTBIN}/kubelet" \
|
|
--really-crash-for-testing=true \
|
|
--root-dir=/tmp/kubelet.$$ \
|
|
--cert-dir="${TMPDIR:-/tmp/}" \
|
|
--docker-endpoint="fake://" \
|
|
--hostname-override="127.0.0.1" \
|
|
--address="127.0.0.1" \
|
|
--api-servers="${API_HOST}:${API_PORT}" \
|
|
--port="$KUBELET_PORT" \
|
|
--healthz-port="${KUBELET_HEALTHZ_PORT}" 1>&2 &
|
|
KUBELET_PID=$!
|
|
|
|
kube::util::wait_for_url "http://127.0.0.1:${KUBELET_HEALTHZ_PORT}/healthz" "kubelet"
|
|
|
|
# Start kube-apiserver
|
|
kube::log::status "Starting kube-apiserver"
|
|
KUBE_API_VERSIONS="v1" "${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \
|
|
--address="127.0.0.1" \
|
|
--public-address-override="127.0.0.1" \
|
|
--port="${API_PORT}" \
|
|
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
|
|
--public-address-override="127.0.0.1" \
|
|
--kubelet-port=${KUBELET_PORT} \
|
|
--runtime-config=api/v1 \
|
|
--cert-dir="${TMPDIR:-/tmp/}" \
|
|
--service-cluster-ip-range="10.0.0.0/24" 1>&2 &
|
|
APISERVER_PID=$!
|
|
|
|
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/healthz" "apiserver"
|
|
|
|
# Start controller manager
|
|
kube::log::status "Starting controller-manager"
|
|
"${KUBE_OUTPUT_HOSTBIN}/kube-controller-manager" \
|
|
--port="${CTLRMGR_PORT}" \
|
|
--master="127.0.0.1:${API_PORT}" 1>&2 &
|
|
CTLRMGR_PID=$!
|
|
|
|
kube::util::wait_for_url "http://127.0.0.1:${CTLRMGR_PORT}/healthz" "controller-manager"
|
|
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/api/v1/nodes/127.0.0.1" "apiserver(nodes)"
|
|
|
|
# Expose kubectl directly for readability
|
|
PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH
|
|
|
|
runTests() {
|
|
version="$1"
|
|
echo "Testing api version: $1"
|
|
if [[ -z "${version}" ]]; then
|
|
kube_flags=(
|
|
-s "http://127.0.0.1:${API_PORT}"
|
|
--match-server-version
|
|
)
|
|
[ "$(kubectl get nodes -t '{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ]
|
|
else
|
|
kube_flags=(
|
|
-s "http://127.0.0.1:${API_PORT}"
|
|
--match-server-version
|
|
--api-version="${version}"
|
|
)
|
|
[ "$(kubectl get nodes -t '{{ .apiVersion }}' "${kube_flags[@]}")" == "${version}" ]
|
|
fi
|
|
id_field=".metadata.name"
|
|
labels_field=".metadata.labels"
|
|
service_selector_field=".spec.selector"
|
|
rc_replicas_field=".spec.replicas"
|
|
rc_status_replicas_field=".status.replicas"
|
|
rc_container_image_field=".spec.template.spec.containers"
|
|
port_field="(index .spec.ports 0).port"
|
|
port_name="(index .spec.ports 0).name"
|
|
image_field="(index .spec.containers 0).image"
|
|
|
|
# Passing no arguments to create is an error
|
|
! kubectl create
|
|
|
|
#######################
|
|
# kubectl local proxy #
|
|
#######################
|
|
|
|
# Make sure the UI can be proxied
|
|
start-proxy --api-prefix=/
|
|
check-curl-proxy-code /ui 301
|
|
check-curl-proxy-code /metrics 200
|
|
if [[ -n "${version}" ]]; then
|
|
check-curl-proxy-code /api/${version}/namespaces 200
|
|
fi
|
|
stop-proxy
|
|
|
|
# Default proxy locks you into the /api path (legacy behavior)
|
|
start-proxy
|
|
check-curl-proxy-code /ui 404
|
|
check-curl-proxy-code /metrics 404
|
|
check-curl-proxy-code /api/ui 404
|
|
if [[ -n "${version}" ]]; then
|
|
check-curl-proxy-code /api/${version}/namespaces 200
|
|
fi
|
|
stop-proxy
|
|
|
|
# Custom paths let you see everything.
|
|
start-proxy --api-prefix=/custom
|
|
check-curl-proxy-code /custom/ui 301
|
|
check-curl-proxy-code /custom/metrics 200
|
|
if [[ -n "${version}" ]]; then
|
|
check-curl-proxy-code /custom/api/${version}/namespaces 200
|
|
fi
|
|
stop-proxy
|
|
|
|
###########################
|
|
# POD creation / deletion #
|
|
###########################
|
|
|
|
kube::log::status "Testing kubectl(${version}:pods)"
|
|
|
|
### Create POD valid-pod from JSON
|
|
# Pre-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create "${kube_flags[@]}" -f docs/admin/limitrange/valid-pod.yaml
|
|
# Post-condition: valid-pod POD is running
|
|
kubectl get "${kube_flags[@]}" pods -o json
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod'
|
|
kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod'
|
|
kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod'
|
|
# Repeat above test using jsonpath template
|
|
kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod'
|
|
kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod'
|
|
kube::test::get_object_jsonpath_assert 'pod/valid-pod' "{$id_field}" 'valid-pod'
|
|
kube::test::get_object_jsonpath_assert 'pods/valid-pod' "{$id_field}" 'valid-pod'
|
|
# Describe command should print detailed information
|
|
kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image(s):" "Node:" "Labels:" "Status:" "Replication Controllers"
|
|
# Describe command (resource only) should print detailed information
|
|
kube::test::describe_resource_assert pods "Name:" "Image(s):" "Node:" "Labels:" "Status:" "Replication Controllers"
|
|
|
|
### Dump current valid-pod POD
|
|
output_pod=$(kubectl get pod valid-pod -o yaml --output-version=v1 "${kube_flags[@]}")
|
|
|
|
### Delete POD valid-pod by id
|
|
# Pre-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
# Command
|
|
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0
|
|
# Post-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
|
|
### Create POD valid-pod from dumped YAML
|
|
# Pre-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
echo "${output_pod}" | kubectl create -f - "${kube_flags[@]}"
|
|
# Post-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
|
|
### Delete POD valid-pod from JSON
|
|
# Pre-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
# Command
|
|
kubectl delete -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0
|
|
# Post-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
|
|
### Create POD redis-master from JSON
|
|
# Pre-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
|
|
# Post-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
|
|
### Delete POD valid-pod with label
|
|
# Pre-condition: valid-pod POD is running
|
|
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:'
|
|
# Command
|
|
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0
|
|
# Post-condition: no POD is running
|
|
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
|
|
|
|
### Create POD valid-pod from JSON
|
|
# Pre-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
|
|
# Post-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
|
|
### Delete PODs with no parameter mustn't kill everything
|
|
# Pre-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
# Command
|
|
! kubectl delete pods "${kube_flags[@]}"
|
|
# Post-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
|
|
### Delete PODs with --all and a label selector is not permitted
|
|
# Pre-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
# Command
|
|
! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}"
|
|
# Post-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
|
|
### Delete all PODs
|
|
# Pre-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
# Command
|
|
kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 # --all remove all the pods
|
|
# Post-condition: no POD is running
|
|
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
|
|
|
|
### Create two PODs
|
|
# Pre-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
|
|
kubectl create -f examples/redis/redis-proxy.yaml "${kube_flags[@]}"
|
|
# Post-condition: valid-pod and redis-proxy PODs are running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
|
|
|
|
### Delete multiple PODs at once
|
|
# Pre-condition: valid-pod and redis-proxy PODs are running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
|
|
# Command
|
|
kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # delete multiple pods at once
|
|
# Post-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
|
|
### Create two PODs
|
|
# Pre-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
|
|
kubectl create -f examples/redis/redis-proxy.yaml "${kube_flags[@]}"
|
|
# Post-condition: valid-pod and redis-proxy PODs are running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
|
|
|
|
### Stop multiple PODs at once
|
|
# Pre-condition: valid-pod and redis-proxy PODs are running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
|
|
# Command
|
|
kubectl stop pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # stop multiple pods at once
|
|
# Post-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
|
|
### Create valid-pod POD
|
|
# Pre-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
|
|
# Post-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
|
|
### Label the valid-pod POD
|
|
# Pre-condition: valid-pod is not labelled
|
|
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:'
|
|
# Command
|
|
kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}"
|
|
# Post-conditon: valid-pod is labelled
|
|
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
|
|
|
|
### Delete POD by label
|
|
# Pre-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
# Command
|
|
kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 "${kube_flags[@]}"
|
|
# Post-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
|
|
### Create valid-pod POD
|
|
# Pre-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
|
|
# Post-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
|
|
## Patch pod can change image
|
|
# Command
|
|
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]}}'
|
|
# Post-condition: valid-pod POD has image nginx
|
|
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
|
|
## Patch pod from JSON can change image
|
|
# Command
|
|
kubectl patch "${kube_flags[@]}" -f docs/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "kubernetes/pause"}]}}'
|
|
# Post-condition: valid-pod POD has image kubernetes/pause
|
|
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'kubernetes/pause:'
|
|
|
|
## --force replace pod can change other field, e.g., spec.container.name
|
|
# Command
|
|
kubectl get "${kube_flags[@]}" pod valid-pod -o json | sed 's/"kubernetes-serve-hostname"/"replaced-k8s-serve-hostname"/g' > tmp-valid-pod.json
|
|
kubectl replace "${kube_flags[@]}" --force -f tmp-valid-pod.json
|
|
# Post-condition: spec.container.name = "replaced-k8s-serve-hostname"
|
|
kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname'
|
|
rm tmp-valid-pod.json
|
|
|
|
### Overwriting an existing label is not permitted
|
|
# Pre-condition: name is valid-pod
|
|
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
|
|
# Command
|
|
! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}"
|
|
# Post-condition: name is still valid-pod
|
|
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
|
|
|
|
### --overwrite must be used to overwrite existing label, can be applied to all resources
|
|
# Pre-condition: name is valid-pod
|
|
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
|
|
# Command
|
|
kubectl label --overwrite pods --all name=valid-pod-super-sayan "${kube_flags[@]}"
|
|
# Post-condition: name is valid-pod-super-sayan
|
|
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod-super-sayan'
|
|
|
|
### Delete POD by label
|
|
# Pre-condition: valid-pod POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
# Command
|
|
kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 "${kube_flags[@]}"
|
|
# Post-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
|
|
### Create two PODs from 1 yaml file
|
|
# Pre-condition: no POD is running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f docs/user-guide/multi-pod.yaml "${kube_flags[@]}"
|
|
# Post-condition: valid-pod and redis-proxy PODs are running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:redis-proxy:'
|
|
|
|
### Delete two PODs from 1 yaml file
|
|
# Pre-condition: redis-master and redis-proxy PODs are running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:redis-proxy:'
|
|
# Command
|
|
kubectl delete -f docs/user-guide/multi-pod.yaml "${kube_flags[@]}"
|
|
# Post-condition: no PODs are running
|
|
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
|
|
##############
|
|
# Namespaces #
|
|
##############
|
|
|
|
### Create POD valid-pod in specific namespace
|
|
# Pre-condition: no POD is running
|
|
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create "${kube_flags[@]}" --namespace=other -f docs/admin/limitrange/valid-pod.yaml
|
|
# Post-condition: valid-pod POD is running
|
|
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
|
|
### Delete POD valid-pod in specific namespace
|
|
# Pre-condition: valid-pod POD is running
|
|
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
|
# Command
|
|
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0
|
|
# Post-condition: no POD is running
|
|
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
|
|
|
|
#################
|
|
# Pod templates #
|
|
#################
|
|
|
|
### Create PODTEMPLATE
|
|
# Pre-condition: no PODTEMPLATE
|
|
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f docs/user-guide/walkthrough/podtemplate.json "${kube_flags[@]}"
|
|
# Post-condition: nginx PODTEMPLATE is available
|
|
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
|
|
|
|
### Printing pod templates works
|
|
kubectl get podtemplates "${kube_flags[@]}"
|
|
[[ "$(kubectl get podtemplates -o yaml "${kube_flags[@]}" | grep nginx)" ]]
|
|
|
|
### Delete nginx pod template by name
|
|
# Pre-condition: nginx pod template is available
|
|
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
|
|
# Command
|
|
kubectl delete podtemplate nginx "${kube_flags[@]}"
|
|
# Post-condition: No templates exist
|
|
kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" ''
|
|
|
|
|
|
############
|
|
# Services #
|
|
############
|
|
|
|
kube::log::status "Testing kubectl(${version}:services)"
|
|
|
|
### Create redis-master service from JSON
|
|
# Pre-condition: Only the default kubernetes services are running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
|
|
# Command
|
|
kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}"
|
|
# Post-condition: redis-master service is running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
|
|
# Describe command should print detailed information
|
|
kube::test::describe_object_assert services 'redis-master' "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
|
|
# Describe command (resource only) should print detailed information
|
|
kube::test::describe_resource_assert services "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
|
|
|
|
### Dump current redis-master service
|
|
output_service=$(kubectl get service redis-master -o json --output-version=v1 "${kube_flags[@]}")
|
|
|
|
### Delete redis-master-service by id
|
|
# Pre-condition: redis-master service is running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
|
|
# Command
|
|
kubectl delete service redis-master "${kube_flags[@]}"
|
|
# Post-condition: Only the default kubernetes services are running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
|
|
|
|
### Create redis-master-service from dumped JSON
|
|
# Pre-condition: Only the default kubernetes services are running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
|
|
# Command
|
|
echo "${output_service}" | kubectl create -f - "${kube_flags[@]}"
|
|
# Post-condition: redis-master service is running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
|
|
|
|
### Create redis-master-${version}-test service
|
|
# Pre-condition: redis-master-service service is running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
|
|
# Command
|
|
kubectl create -f - "${kube_flags[@]}" << __EOF__
|
|
{
|
|
"kind": "Service",
|
|
"apiVersion": "v1",
|
|
"metadata": {
|
|
"name": "service-${version}-test"
|
|
},
|
|
"spec": {
|
|
"ports": [
|
|
{
|
|
"protocol": "TCP",
|
|
"port": 80,
|
|
"targetPort": 80
|
|
}
|
|
]
|
|
}
|
|
}
|
|
__EOF__
|
|
# Post-condition:redis-master-service service is running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
|
|
|
|
### Identity
|
|
kubectl get service "${kube_flags[@]}" service-${version}-test -o json | kubectl replace "${kube_flags[@]}" -f -
|
|
|
|
### Delete services by id
|
|
# Pre-condition: redis-master-service service is running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
|
|
# Command
|
|
kubectl delete service redis-master "${kube_flags[@]}"
|
|
kubectl delete service "service-${version}-test" "${kube_flags[@]}"
|
|
# Post-condition: Only the default kubernetes services are running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
|
|
|
|
### Create two services
|
|
# Pre-condition: Only the default kubernetes services are running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
|
|
# Command
|
|
kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}"
|
|
kubectl create -f examples/guestbook/redis-slave-service.yaml "${kube_flags[@]}"
|
|
# Post-condition: redis-master and redis-slave services are running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
|
|
|
|
### Delete multiple services at once
|
|
# Pre-condition: redis-master and redis-slave services are running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
|
|
# Command
|
|
kubectl delete services redis-master redis-slave "${kube_flags[@]}" # delete multiple services at once
|
|
# Post-condition: Only the default kubernetes services are running
|
|
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
|
|
|
|
|
|
###########################
|
|
# Replication controllers #
|
|
###########################
|
|
|
|
kube::log::status "Testing kubectl(${version}:replicationcontrollers)"
|
|
|
|
### Create and stop controller, make sure it doesn't leak pods
|
|
# Pre-condition: no replication controller is running
|
|
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
|
|
kubectl stop rc frontend "${kube_flags[@]}"
|
|
# Post-condition: no pods from frontend controller
|
|
kube::test::get_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
|
|
### Create replication controller frontend from JSON
|
|
# Pre-condition: no replication controller is running
|
|
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
|
|
# Post-condition: frontend replication controller is running
|
|
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
|
|
# Describe command should print detailed information
|
|
kube::test::describe_object_assert rc 'frontend' "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:"
|
|
# Describe command (resource only) should print detailed information
|
|
kube::test::describe_resource_assert rc "Name:" "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:"
|
|
|
|
### Scale replication controller frontend with current-replicas and replicas
|
|
# Pre-condition: 3 replicas
|
|
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
|
|
# Command
|
|
kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
|
|
# Post-condition: 2 replicas
|
|
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
|
|
|
|
### Scale replication controller frontend with (wrong) current-replicas and replicas
|
|
# Pre-condition: 2 replicas
|
|
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
|
|
# Command
|
|
! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
|
|
# Post-condition: nothing changed
|
|
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
|
|
|
|
### Scale replication controller frontend with replicas only
|
|
# Pre-condition: 2 replicas
|
|
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
|
|
# Command
|
|
kubectl scale --replicas=3 replicationcontrollers frontend "${kube_flags[@]}"
|
|
# Post-condition: 3 replicas
|
|
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
|
|
|
|
### Scale replication controller from JSON with replicas only
|
|
# Pre-condition: 3 replicas
|
|
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
|
|
# Command
|
|
kubectl scale --replicas=2 -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
|
|
# Post-condition: 2 replicas
|
|
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
|
|
|
|
### Scale multiple replication controllers
|
|
kubectl create -f examples/guestbook/redis-master-controller.yaml "${kube_flags[@]}"
|
|
kubectl create -f examples/guestbook/redis-slave-controller.yaml "${kube_flags[@]}"
|
|
# Command
|
|
kubectl scale rc/redis-master rc/redis-slave --replicas=4
|
|
# Post-condition: 4 replicas each
|
|
kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4'
|
|
kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4'
|
|
# Clean-up
|
|
kubectl delete rc redis-{master,slave} "${kube_flags[@]}"
|
|
|
|
### Expose replication controller as service
|
|
# Pre-condition: 2 replicas
|
|
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
|
|
# Command
|
|
kubectl expose rc frontend --port=80 "${kube_flags[@]}"
|
|
# Post-condition: service exists and the port is unnamed
|
|
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
|
|
# Command
|
|
kubectl expose service frontend --port=443 --name=frontend-2 "${kube_flags[@]}"
|
|
# Post-condition: service exists and the port is unnamed
|
|
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" '<no value> 443'
|
|
# Command
|
|
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
|
|
kubectl expose pod valid-pod --port=444 --name=frontend-3 "${kube_flags[@]}"
|
|
# Post-condition: service exists and the port is unnamed
|
|
kube::test::get_object_assert 'service frontend-3' "{{$port_name}} {{$port_field}}" '<no value> 444'
|
|
# Create a service using service/v1 generator
|
|
kubectl expose rc frontend --port=80 --name=frontend-4 --generator=service/v1 "${kube_flags[@]}"
|
|
# Post-condition: service exists and the port is named default.
|
|
kube::test::get_object_assert 'service frontend-4' "{{$port_name}} {{$port_field}}" 'default 80'
|
|
# Verify that expose service works without specifying a port.
|
|
kubectl expose service frontend --name=frontend-5 "${kube_flags[@]}"
|
|
# Post-condition: service exists with the same port as the original service.
|
|
kube::test::get_object_assert 'service frontend-5' "{{$port_field}}" '80'
|
|
# Cleanup services
|
|
kubectl delete pod valid-pod "${kube_flags[@]}"
|
|
kubectl delete service frontend{,-2,-3,-4,-5} "${kube_flags[@]}"
|
|
|
|
### Perform a rolling update with --image
|
|
# Command
|
|
kubectl rolling-update frontend --image=kubernetes/pause --update-period=10ns --poll-interval=10ms "${kube_flags[@]}"
|
|
# Post-condition: current image IS kubernetes/pause
|
|
kube::test::get_object_assert 'rc frontend' '{{range \$c:=$rc_container_image_field}} {{\$c.image}} {{end}}' ' +kubernetes/pause +'
|
|
|
|
### Delete replication controller with id
|
|
# Pre-condition: frontend replication controller is running
|
|
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
|
|
# Command
|
|
kubectl stop rc frontend "${kube_flags[@]}"
|
|
# Post-condition: no replication controller is running
|
|
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
|
|
### Create two replication controllers
|
|
# Pre-condition: no replication controller is running
|
|
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
|
|
kubectl create -f examples/guestbook/redis-slave-controller.yaml "${kube_flags[@]}"
|
|
# Post-condition: frontend and redis-slave
|
|
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
|
|
|
|
### Delete multiple controllers at once
|
|
# Pre-condition: frontend and redis-slave
|
|
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
|
|
# Command
|
|
kubectl stop rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once
|
|
# Post-condition: no replication controller is running
|
|
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
|
|
|
######################
|
|
# Persistent Volumes #
|
|
######################
|
|
|
|
### Create and delete persistent volume examples
|
|
# Pre-condition: no persistent volumes currently exist
|
|
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f docs/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}"
|
|
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0001:'
|
|
kubectl delete pv pv0001 "${kube_flags[@]}"
|
|
kubectl create -f docs/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}"
|
|
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0002:'
|
|
kubectl delete pv pv0002 "${kube_flags[@]}"
|
|
kubectl create -f docs/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}"
|
|
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0003:'
|
|
kubectl delete pv pv0003 "${kube_flags[@]}"
|
|
# Post-condition: no PVs
|
|
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" ''
|
|
|
|
############################
|
|
# Persistent Volume Claims #
|
|
############################
|
|
|
|
### Create and delete persistent volume claim examples
|
|
# Pre-condition: no persistent volume claims currently exist
|
|
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" ''
|
|
# Command
|
|
kubectl create -f docs/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}"
|
|
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-1:'
|
|
kubectl delete pvc myclaim-1 "${kube_flags[@]}"
|
|
|
|
kubectl create -f docs/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}"
|
|
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-2:'
|
|
kubectl delete pvc myclaim-2 "${kube_flags[@]}"
|
|
|
|
kubectl create -f docs/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}"
|
|
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-3:'
|
|
kubectl delete pvc myclaim-3 "${kube_flags[@]}"
|
|
# Post-condition: no PVCs
|
|
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" ''
|
|
|
|
|
|
|
|
#########
|
|
# Nodes #
|
|
#########
|
|
|
|
kube::log::status "Testing kubectl(${version}:nodes)"
|
|
|
|
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
|
|
|
kube::test::describe_object_assert nodes "127.0.0.1" "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
|
|
# Describe command (resource only) should print detailed information
|
|
kube::test::describe_resource_assert nodes "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
|
|
|
|
### kubectl patch update can mark node unschedulable
|
|
# Pre-condition: node is schedulable
|
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
|
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":true}}'
|
|
# Post-condition: node is unschedulable
|
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
|
|
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":null}}'
|
|
# Post-condition: node is schedulable
|
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
|
|
|
|
|
#####################
|
|
# Retrieve multiple #
|
|
#####################
|
|
|
|
kube::log::status "Testing kubectl(${version}:multiget)"
|
|
kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:'
|
|
|
|
|
|
#####################
|
|
# Resource aliasing #
|
|
#####################
|
|
|
|
kube::log::status "Testing resource aliasing"
|
|
kubectl create -f examples/cassandra/cassandra-controller.yaml "${kube_flags[@]}"
|
|
kubectl scale rc cassandra --replicas=1 "${kube_flags[@]}"
|
|
kubectl create -f examples/cassandra/cassandra-service.yaml "${kube_flags[@]}"
|
|
kube::test::get_object_assert "all -l'name=cassandra'" "{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}" 'cassandra:cassandra:cassandra:'
|
|
kubectl delete all -l name=cassandra "${kube_flags[@]}"
|
|
|
|
|
|
###########
|
|
# Swagger #
|
|
###########
|
|
|
|
if [[ -n "${version}" ]]; then
|
|
# Verify schema
|
|
file="${KUBE_TEMP}/schema-${version}.json"
|
|
curl -s "http://127.0.0.1:${API_PORT}/swaggerapi/api/${version}" > "${file}"
|
|
[[ "$(grep "list of returned" "${file}")" ]]
|
|
[[ "$(grep "list of pods" "${file}")" ]]
|
|
[[ "$(grep "watch for changes to the described resources" "${file}")" ]]
|
|
fi
|
|
|
|
kube::test::clear_all
|
|
}
|
|
|
|
kube_api_versions=(
|
|
""
|
|
v1
|
|
)
|
|
for version in "${kube_api_versions[@]}"; do
|
|
KUBE_API_VERSIONS="v1" runTests "${version}"
|
|
done
|
|
|
|
kube::log::status "TEST PASSED"
|