k3s/tests/docker/test-run-upgrade

98 lines
3.2 KiB
Plaintext
Raw Normal View History

#!/bin/bash
all_services=(
coredns
local-path-provisioner
metrics-server
traefik
)
export NUM_SERVERS=1
export NUM_AGENTS=1
export WAIT_SERVICES="${all_services[@]}"
REPO=${REPO:-rancher}
IMAGE_NAME=${IMAGE_NAME:-k3s}
CURRENT_CHANNEL=$(echo ${VERSION_K8S} | awk -F. '{print "v1." $2}')
CURRENT_VERSION=$(curl -s https://update.k3s.io/v1-release/channels/${CURRENT_CHANNEL} -o /dev/null -w '%{redirect_url}' | awk -F/ '{print gensub(/\+/, "-", "g", $NF)}')
Update to v1.26.0-k3s1 (#6370) * Update to v1.26.0-alpha.2 Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * go generate Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Default CURRENT_VERSION to VERSION_TAG for alpha versions Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * remove containerd package Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Update k8s to v1.26.0-rc.0-k3s1 cri-tools cri-dockerd and cadvisor Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * replace cri-api reference to the new api Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * go mod tidy Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fix version script to allow rc and alphas Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fix version script to allow rc and alphas Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fix version script to allow rc and alphas Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Update to Kubernetes 1.26.0-rc.1 Signed-off-by: Brad Davidson <brad.davidson@rancher.com> * Undo helm-controller pin Signed-off-by: Brad Davidson <brad.davidson@rancher.com> * Bump containerd to -k3s2 for stargz fix Signed-off-by: Brad Davidson <brad.davidson@rancher.com> * DevicePlugins featuregate is locked to on Signed-off-by: Brad Davidson <brad.davidson@rancher.com> * Bump kine for DeleteRange fix Signed-off-by: Brad Davidson <brad.davidson@rancher.com> * Update to v1.26.0-k3s1 Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * go mod tidy Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Bring back snapshotter checks and update golang to 1.19.4 Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * fix windows containerd snapshotter checks Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> Signed-off-by: Brad Davidson <brad.davidson@rancher.com> Co-authored-by: Brad Davidson <brad.davidson@rancher.com>
2022-12-09 23:42:15 +00:00
if [ -z "${CURRENT_VERSION}" ]; then
CURRENT_VERSION=${VERSION_TAG}
fi
export K3S_IMAGE_SERVER=${REPO}/${IMAGE_NAME}:${CURRENT_VERSION}${SUFFIX}
export K3S_IMAGE_AGENT=${REPO}/${IMAGE_NAME}:${CURRENT_VERSION}${SUFFIX}
server-pre-hook(){
local testID=$(basename $TEST_DIR)
export SERVER_DOCKER_ARGS="\
--mount type=volume,src=k3s-server-$1-${testID,,}-rancher,dst=/var/lib/rancher/k3s \
--mount type=volume,src=k3s-server-$1-${testID,,}-log,dst=/var/log \
--mount type=volume,src=k3s-server-$1-${testID,,}-etc,dst=/etc/rancher"
}
export -f server-pre-hook
agent-pre-hook(){
local testID=$(basename $TEST_DIR)
export AGENT_DOCKER_ARGS="\
--mount type=volume,src=k3s-agent-$1-${testID,,}-rancher,dst=/var/lib/rancher/k3s \
--mount type=volume,src=k3s-agent-$1-${testID,,}-log,dst=/var/log \
--mount type=volume,src=k3s-agent-$1-${testID,,}-etc,dst=/etc/rancher"
}
export -f agent-pre-hook
start-test() {
# Create a pod and print the version before upgrading
kubectl get node -o wide
kubectl create -f scripts/airgap/volume-test.yaml
# Add post-hook sleeps to give the kubelet time to update the version after startup.
# Server gets an extra 60 seconds to handle the metrics-server service being unavailable:
# https://github.com/kubernetes/kubernetes/issues/120739
server-post-hook(){
sleep 75
}
export -f server-post-hook
agent-post-hook(){
sleep 15
}
export -f agent-post-hook
# Switch the image back to the current build, delete the node containers, and re-provision with the same datastore volumes
unset K3S_IMAGE_SERVER
unset K3S_IMAGE_AGENT
if [ $NUM_AGENTS -gt 0 ]; then
for i in $(seq 1 $NUM_AGENTS); do
docker rm -f -v $(cat $TEST_DIR/agents/$i/metadata/name)
rm -rf $TEST_DIR/agents/$i
done
fi
for i in $(seq 1 $NUM_SERVERS); do
docker rm -f -v $(cat $TEST_DIR/servers/$i/metadata/name)
rm -rf $TEST_DIR/servers/$i
done
provision-cluster
# Confirm that the nodes are running the current build and that the pod we created earlier is still there
. ./scripts/version.sh || true
verify-valid-versions $(cat $TEST_DIR/servers/1/metadata/name)
kubectl get pod -n kube-system volume-test -o wide
if ! kubectl get node -o wide | grep -qF $VERSION; then
echo "Expected version $VERSION not found in node list"
return 1
fi
}
export -f start-test
test-cleanup-hook(){
local testID=$(basename $TEST_DIR)
docker volume ls -q | grep -F ${testID,,} | xargs -r docker volume rm
}
export -f test-cleanup-hook
# --- create a single-node cluster from the latest release, then restart the containers with the current build
LABEL=UPGRADE run-test
cleanup-test-env