mirror of https://github.com/k3s-io/k3s
Test jobs/hpas in storage update
parent
512cc08929
commit
156c1f3c5d
|
@ -46,6 +46,8 @@ declare -a resources=(
|
|||
"resourcequotas"
|
||||
"secrets"
|
||||
"services"
|
||||
"jobs"
|
||||
"horizontalpodautoscalers"
|
||||
)
|
||||
|
||||
# Find all the namespaces.
|
||||
|
@ -55,11 +57,25 @@ then
|
|||
echo "Unexpected: No namespace found. Nothing to do."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
all_failed=1
|
||||
|
||||
for resource in "${resources[@]}"
|
||||
do
|
||||
for namespace in "${namespaces[@]}"
|
||||
do
|
||||
# If get fails, assume it's because the resource hasn't been installed in the apiserver.
|
||||
# TODO hopefully we can remove this once we use dynamic discovery of gettable/updateable
|
||||
# resources.
|
||||
set +e
|
||||
instances=( $("${KUBECTL}" get "${resource}" --namespace="${namespace}" -o go-template="{{range.items}}{{.metadata.name}} {{end}}"))
|
||||
result=$?
|
||||
set -e
|
||||
|
||||
if [[ "${all_failed}" -eq 1 && "${result}" -eq 0 ]]; then
|
||||
all_failed=0
|
||||
fi
|
||||
|
||||
# Nothing to do if there is no instance of that resource.
|
||||
if [[ -z "${instances:-}" ]]
|
||||
then
|
||||
|
@ -107,6 +123,11 @@ do
|
|||
done
|
||||
done
|
||||
|
||||
if [[ "${all_failed}" -eq 1 ]]; then
|
||||
echo "kubectl get failed for all resources"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "All objects updated successfully!!"
|
||||
|
||||
exit 0
|
||||
|
|
|
@ -29,11 +29,13 @@ KUBE_OLD_API_VERSION=${KUBE_OLD_API_VERSION:-"v1"}
|
|||
# The new api version
|
||||
KUBE_NEW_API_VERSION=${KUBE_NEW_API_VERSION:-"v1"}
|
||||
|
||||
KUBE_OLD_STORAGE_VERSIONS=${KUBE_OLD_STORAGE_VERSIONs:-""}
|
||||
KUBE_NEW_STORAGE_VERSIONS=${KUBE_NEW_STORAGE_VERSIONs:-""}
|
||||
|
||||
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
|
||||
ETCD_PORT=${ETCD_PORT:-4001}
|
||||
API_PORT=${API_PORT:-8080}
|
||||
API_HOST=${API_HOST:-127.0.0.1}
|
||||
KUBELET_PORT=${KUBELET_PORT:-10250}
|
||||
KUBE_API_VERSIONS=""
|
||||
RUNTIME_CONFIG=""
|
||||
|
||||
|
@ -41,23 +43,25 @@ KUBECTL="${KUBE_OUTPUT_HOSTBIN}/kubectl"
|
|||
UPDATE_ETCD_OBJECTS_SCRIPT="${KUBE_ROOT}/cluster/update-storage-objects.sh"
|
||||
|
||||
function startApiServer() {
|
||||
kube::log::status "Starting kube-apiserver with KUBE_API_VERSIONS: ${KUBE_API_VERSIONS} and runtime-config: ${RUNTIME_CONFIG}"
|
||||
local storage_versions=${1:-""}
|
||||
kube::log::status "Starting kube-apiserver with KUBE_API_VERSIONS: ${KUBE_API_VERSIONS}"
|
||||
kube::log::status " and runtime-config: ${RUNTIME_CONFIG}"
|
||||
kube::log::status " and storage-version overrides: ${storage_versions}"
|
||||
|
||||
KUBE_API_VERSIONS="${KUBE_API_VERSIONS}" \
|
||||
"${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \
|
||||
--address="127.0.0.1" \
|
||||
--public-address-override="127.0.0.1" \
|
||||
--port="${API_PORT}" \
|
||||
--insecure-bind-address="${API_HOST}" \
|
||||
--bind-address="${API_HOST}" \
|
||||
--insecure-port="${API_PORT}" \
|
||||
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
|
||||
--public-address-override="127.0.0.1" \
|
||||
--kubelet-port=${KUBELET_PORT} \
|
||||
--runtime-config="${RUNTIME_CONFIG}" \
|
||||
--cert-dir="${TMPDIR:-/tmp/}" \
|
||||
--service-cluster-ip-range="10.0.0.0/24" 1>&2 &
|
||||
--service-cluster-ip-range="10.0.0.0/24" \
|
||||
--storage-versions="${storage_versions}" 1>&2 &
|
||||
APISERVER_PID=$!
|
||||
|
||||
# url, prefix, wait, times
|
||||
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/healthz" "apiserver: " 1 120
|
||||
kube::util::wait_for_url "http://${API_HOST}:${API_PORT}/healthz" "apiserver: " 1 120
|
||||
}
|
||||
|
||||
function killApiServer() {
|
||||
|
@ -80,25 +84,54 @@ function cleanup() {
|
|||
|
||||
trap cleanup EXIT SIGINT
|
||||
|
||||
"${KUBE_ROOT}/hack/build-go.sh" cmd/kube-apiserver
|
||||
|
||||
kube::etcd::start
|
||||
|
||||
kube::log::status "Running test for update etcd object scenario"
|
||||
### BEGIN TEST DEFINITION CUSTOMIZATION ###
|
||||
|
||||
"${KUBE_ROOT}/hack/build-go.sh" cmd/kube-apiserver
|
||||
# source_file,resource,namespace,name,old_version,new_version
|
||||
tests=(
|
||||
docs/user-guide/job.yaml,jobs,default,pi,extensions/v1beta1,batch/v1
|
||||
docs/user-guide/horizontal-pod-autoscaling/hpa-php-apache.yaml,horizontalpodautoscalers,default,php-apache,extensions/v1beta1,autoscaling/v1
|
||||
)
|
||||
|
||||
# need to include extensions/v1beta1 in new api version because its internal types are used by jobs
|
||||
# and hpas
|
||||
KUBE_OLD_API_VERSION="v1,extensions/v1beta1"
|
||||
KUBE_NEW_API_VERSION="v1,extensions/v1beta1,batch/v1,autoscaling/v1"
|
||||
KUBE_OLD_STORAGE_VERSIONS="batch=extensions/v1beta1,autoscaling=extensions/v1beta1"
|
||||
KUBE_NEW_STORAGE_VERSIONS="batch/v1,autoscaling/v1"
|
||||
|
||||
### END TEST DEFINITION CUSTOMIZATION ###
|
||||
|
||||
|
||||
#######################################################
|
||||
# Step 1: Start a server which supports both the old and new api versions,
|
||||
# but KUBE_OLD_API_VERSION is the latest (storage) version.
|
||||
#######################################################
|
||||
|
||||
KUBE_API_VERSIONS="${KUBE_OLD_API_VERSION},${KUBE_NEW_API_VERSION}"
|
||||
RUNTIME_CONFIG="api/all=false,api/${KUBE_OLD_API_VERSION}=true,api/${KUBE_NEW_API_VERSION}=true"
|
||||
startApiServer
|
||||
startApiServer ${KUBE_OLD_STORAGE_VERSIONS}
|
||||
|
||||
# Create a pod
|
||||
kube::log::status "Creating a pod"
|
||||
${KUBECTL} create -f docs/user-guide/pod.yaml
|
||||
|
||||
# Create object(s)
|
||||
for test in ${tests[@]}; do
|
||||
IFS=',' read -ra test_data <<<"$test"
|
||||
source_file=${test_data[0]}
|
||||
|
||||
kube::log::status "Creating ${source_file}"
|
||||
${KUBECTL} create -f "${source_file}"
|
||||
|
||||
# Verify that the storage version is the old version
|
||||
resource=${test_data[1]}
|
||||
namespace=${test_data[2]}
|
||||
name=${test_data[3]}
|
||||
old_storage_version=${test_data[4]}
|
||||
|
||||
kube::log::status "Verifying ${resource}/${namespace}/${name} has storage version ${old_storage_version} in etcd"
|
||||
curl -s http://${ETCD_HOST}:${ETCD_PORT}/v2/keys/registry/${resource}/${namespace}/${name} | grep ${old_storage_version}
|
||||
done
|
||||
|
||||
killApiServer
|
||||
|
||||
|
@ -110,13 +143,25 @@ killApiServer
|
|||
|
||||
KUBE_API_VERSIONS="${KUBE_NEW_API_VERSION},${KUBE_OLD_API_VERSION}"
|
||||
RUNTIME_CONFIG="api/all=false,api/${KUBE_OLD_API_VERSION}=true,api/${KUBE_NEW_API_VERSION}=true"
|
||||
startApiServer
|
||||
startApiServer ${KUBE_NEW_STORAGE_VERSIONS}
|
||||
|
||||
# Update etcd objects, so that will now be stored in the new api version.
|
||||
kube::log::status "Updating storage versions in etcd"
|
||||
${UPDATE_ETCD_OBJECTS_SCRIPT}
|
||||
|
||||
killApiServer
|
||||
# Verify that the storage version was changed in etcd
|
||||
for test in ${tests[@]}; do
|
||||
IFS=',' read -ra test_data <<<"$test"
|
||||
resource=${test_data[1]}
|
||||
namespace=${test_data[2]}
|
||||
name=${test_data[3]}
|
||||
new_storage_version=${test_data[5]}
|
||||
|
||||
kube::log::status "Verifying ${resource}/${namespace}/${name} has updated storage version ${new_storage_version} in etcd"
|
||||
curl -s http://${ETCD_HOST}:${ETCD_PORT}/v2/keys/registry/${resource}/${namespace}/${name} | grep ${new_storage_version}
|
||||
done
|
||||
|
||||
killApiServer
|
||||
|
||||
|
||||
#######################################################
|
||||
|
@ -128,9 +173,17 @@ RUNTIME_CONFIG="api/all=false,api/${KUBE_NEW_API_VERSION}=true"
|
|||
|
||||
# This seems to reduce flakiness.
|
||||
sleep 1
|
||||
startApiServer
|
||||
startApiServer ${KUBE_NEW_STORAGE_VERSIONS}
|
||||
|
||||
# Verify that the server is able to read the object.
|
||||
# This will fail if the object is in a version that is not understood by the
|
||||
# master.
|
||||
${KUBECTL} get pods
|
||||
for test in ${tests[@]}; do
|
||||
IFS=',' read -ra test_data <<<"$test"
|
||||
resource=${test_data[1]}
|
||||
namespace=${test_data[2]}
|
||||
name=${test_data[3]}
|
||||
|
||||
# Verify that the server is able to read the object.
|
||||
kube::log::status "Verifying we can retrieve ${resource}/${namespace}/${name} via kubectl"
|
||||
${KUBECTL} get --namespace=${namespace} ${resource}/${name}
|
||||
done
|
||||
|
||||
killApiServer
|
||||
|
|
|
@ -93,6 +93,9 @@ hack/jenkins/update-jobs.sh: # jenkins_jobs.ini contains administrative credent
|
|||
hack/jenkins/update-jobs.sh: if [[ -e jenkins_jobs.ini ]]; then
|
||||
hack/local-up-cluster.sh: runtime_config="--runtime-config=${RUNTIME_CONFIG}"
|
||||
hack/local-up-cluster.sh: runtime_config=""
|
||||
hack/test-update-storage-objects.sh: local storage_versions=${1:-""}
|
||||
hack/test-update-storage-objects.sh: source_file=${test_data[0]}
|
||||
hack/test-update-storage-objects.sh:# source_file,resource,namespace,name,old_version,new_version
|
||||
pkg/kubelet/network/hairpin/hairpin.go: hairpinModeRelativePath = "hairpin_mode"
|
||||
pkg/kubelet/qos/memory_policy_test.go: t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOOMScoreAdj, test.highOOMScoreAdj, oomScoreAdj)
|
||||
pkg/kubelet/qos/memory_policy_test.go: highOOMScoreAdj int // The min oom_score_adj score the container should be assigned.
|
||||
|
|
Loading…
Reference in New Issue