Switch from Google Buckets to AWS S3 Buckets (#6497)

* Add python pip pakacge to install aws cli

Signed-off-by: Venkata Krishna Rohit Sakala <rohitsakala@gmail.com>

* Upload build artifacts to aws s3 instead of gcp bucket

Signed-off-by: Venkata Krishna Rohit Sakala <rohitsakala@gmail.com>

* Upload logs to aws s3 instead of google buckets

Signed-off-by: Venkata Krishna Rohit Sakala <rohitsakala@gmail.com>

* Replace gcloud auth with aws credentials for artifact uploading to buckets

Signed-off-by: Venkata Krishna Rohit Sakala <rohitsakala@gmail.com>

* Replace usage of google bucket with aws s3 buckets

Signed-off-by: Venkata Krishna Rohit Sakala <rohitsakala@gmail.com>

Signed-off-by: Venkata Krishna Rohit Sakala <rohitsakala@gmail.com>
pull/6567/head
Sakala Venkata Krishna Rohit 2 years ago committed by GitHub
parent a07bb555ba
commit 4e2e91e089
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -9,12 +9,14 @@ platform:
steps:
- name: build
image: rancher/dapper:v0.5.0
secrets: [ gcloud_auth, unprivileged_github_token ]
secrets: [ AWS_SECRET_ACCESS_KEY-artifact-uploader, AWS_ACCESS_KEY_ID-artifact-uploader, unprivileged_github_token ]
environment:
GITHUB_TOKEN:
from_secret: unprivileged_github_token
GCLOUD_AUTH:
from_secret: gcloud_auth
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-artifact-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-artifact-uploader
commands:
- dapper ci
- echo "${DRONE_TAG}-amd64" | sed -e 's/+/-/g' >.tags
@ -88,15 +90,17 @@ steps:
- name: test
image: rancher/dapper:v0.5.0
secrets: [ gcloud_auth ]
secrets: [ AWS_SECRET_ACCESS_KEY-artifact-uploader, AWS_ACCESS_KEY_ID-artifact-uploader ]
environment:
ENABLE_REGISTRY: 'true'
GCLOUD_AUTH:
from_secret: gcloud_auth
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-artifact-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-artifact-uploader
commands:
- docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .
- >
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e GCLOUD_AUTH -e SONOBUOY_VERSION -e ENABLE_REGISTRY
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID -e SONOBUOY_VERSION -e ENABLE_REGISTRY
-v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}
volumes:
- name: docker
@ -135,10 +139,12 @@ platform:
steps:
- name: build
image: rancher/dapper:v0.5.0
secrets: [ gcloud_auth ]
secrets: [ AWS_SECRET_ACCESS_KEY-artifact-uploader, AWS_ACCESS_KEY_ID-artifact-uploader ]
environment:
GCLOUD_AUTH:
from_secret: gcloud_auth
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-artifact-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-artifact-uploader
commands:
- dapper ci
- echo "${DRONE_TAG}-arm64" | sed -e 's/+/-/g' >.tags
@ -187,15 +193,17 @@ steps:
- name: test
image: rancher/dapper:v0.5.0
secrets: [ gcloud_auth ]
secrets: [ AWS_SECRET_ACCESS_KEY-artifact-uploader, AWS_ACCESS_KEY_ID-artifact-uploader ]
environment:
ENABLE_REGISTRY: 'true'
GCLOUD_AUTH:
from_secret: gcloud_auth
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-artifact-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-artifact-uploader
commands:
- docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .
- >
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e GCLOUD_AUTH -e SONOBUOY_VERSION -e ENABLE_REGISTRY
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID -e SONOBUOY_VERSION -e ENABLE_REGISTRY
-v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}
volumes:
- name: docker
@ -217,10 +225,12 @@ platform:
steps:
- name: build
image: rancher/dapper:v0.5.0
secrets: [ gcloud_auth ]
secrets: [ AWS_SECRET_ACCESS_KEY-artifact-uploader, AWS_ACCESS_KEY_ID-artifact-uploader ]
environment:
GCLOUD_AUTH:
from_secret: gcloud_auth
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-artifact-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-artifact-uploader
commands:
- dapper ci
- echo "${DRONE_TAG}-arm" | sed -e 's/+/-/g' >.tags
@ -269,15 +279,17 @@ steps:
- name: test
image: rancher/dapper:v0.5.0
secrets: [ gcloud_auth ]
secrets: [ AWS_SECRET_ACCESS_KEY-artifact-uploader, AWS_ACCESS_KEY_ID-artifact-uploader ]
environment:
ENABLE_REGISTRY: 'true'
GCLOUD_AUTH:
from_secret: gcloud_auth
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-artifact-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-artifact-uploader
commands:
- docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .
- >
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e GCLOUD_AUTH -e SONOBUOY_VERSION -e ENABLE_REGISTRY
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID -e SONOBUOY_VERSION -e ENABLE_REGISTRY
-v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}
volumes:
- name: docker
@ -313,10 +325,12 @@ steps:
- name: build
image: rancher/dapper:v0.5.8
secrets: [ gcloud_auth ]
secrets: [ AWS_SECRET_ACCESS_KEY-artifact-uploader, AWS_ACCESS_KEY_ID-artifact-uploader ]
environment:
GCLOUD_AUTH:
from_secret: gcloud_auth
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-artifact-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-artifact-uploader
commands:
- dapper ci
- echo "${DRONE_TAG}-s390x" | sed -e 's/+/-/g' >.tags
@ -368,16 +382,18 @@ steps:
- name: test
image: rancher/dapper:v0.5.8
secrets: [ gcloud_auth ]
secrets: [ AWS_SECRET_ACCESS_KEY-artifact-uploader, AWS_ACCESS_KEY_ID-artifact-uploader ]
environment:
ENABLE_REGISTRY: 'true'
GCLOUD_AUTH:
from_secret: gcloud_auth
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-artifact-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-artifact-uploader
commands:
# we hardcode s390x as the arch because DRONE_STAGE_ARCH is set to amd64
- docker build --target test-k3s -t k3s:test-s390x-${DRONE_COMMIT} -f Dockerfile.test .
- >
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e GCLOUD_AUTH -e SONOBUOY_VERSION -e ENABLE_REGISTRY
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID -e SONOBUOY_VERSION -e ENABLE_REGISTRY
-v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-s390x-${DRONE_COMMIT}
volumes:
- name: docker

@ -9,7 +9,7 @@ ENV https_proxy=$https_proxy
ENV no_proxy=$no_proxy
RUN apk -U --no-cache add bash git gcc musl-dev docker vim less file curl wget ca-certificates jq linux-headers \
zlib-dev tar zip squashfs-tools npm coreutils python3 openssl-dev libffi-dev libseccomp libseccomp-dev \
zlib-dev tar zip squashfs-tools npm coreutils python3 py3-pip openssl-dev libffi-dev libseccomp libseccomp-dev \
libseccomp-static make libuv-static sqlite-dev sqlite-static libselinux libselinux-dev zlib-dev zlib-static \
zstd pigz alpine-sdk binutils-gold btrfs-progs-dev btrfs-progs-static gawk yq \
&& \
@ -17,6 +17,8 @@ RUN apk -U --no-cache add bash git gcc musl-dev docker vim less file curl wget c
apk -U --no-cache add mingw-w64-gcc; \
fi
RUN python3 -m pip install awscli
RUN if [ "$(go env GOARCH)" = "arm64" ]; then \
wget https://github.com/aquasecurity/trivy/releases/download/v0.25.3/trivy_0.25.3_Linux-ARM64.tar.gz && \
tar -zxvf trivy_0.25.3_Linux-ARM64.tar.gz && \

@ -92,7 +92,7 @@ set -o noglob
# Defaults to 'stable'.
GITHUB_URL=https://github.com/k3s-io/k3s/releases
STORAGE_URL=https://storage.googleapis.com/k3s-ci-builds
STORAGE_URL=https://k3s-ci-builds.s3.amazonaws.com
DOWNLOADER=
# --- helper functions for logs ---

@ -94,7 +94,7 @@ if [ "$INSTALL_K3S_DEBUG" = "true" ]; then
fi
GITHUB_URL=https://github.com/k3s-io/k3s/releases
STORAGE_URL=https://storage.googleapis.com/k3s-ci-builds
STORAGE_URL=https://k3s-ci-builds.s3.amazonaws.com
DOWNLOADER=
# --- helper functions for logs ---

@ -1,8 +1,13 @@
#!/bin/bash
[ -n "$GCLOUD_AUTH" ] || {
# Check for AWS Credentials
[ -n "$AWS_SECRET_ACCESS_KEY" ] || {
exit 0
}
[ -n "$AWS_ACCESS_KEY_ID" ] || {
exit 0
}
[ -x "$1" ] || {
echo "First argument should be an executable" >&2
exit 1
@ -23,35 +28,10 @@ cleanup() {
}
trap cleanup EXIT INT
GCLOUD_JSON=${TMPDIR}/.gcloud.json
[ -z "${GCLOUD_AUTH}" ] || echo "${GCLOUD_AUTH}" >${GCLOUD_JSON}
[ -s "${GCLOUD_JSON}" ] || {
echo "gcloud auth not defined" >&2
exit 1
}
BOTO_CONF=${TMPDIR}/.boto
[ -s "${BOTO_CONF}" ] || cat >${BOTO_CONF} <<END
[Credentials]
gs_service_key_file = ${GCLOUD_JSON}
[Boto]
https_validate_certificates = True
[GSUtil]
content_language = en
default_api_version = 2
default_project_id = rancher-dev
END
BUILD_NAME=$(basename $1)-$2
(cd $(dirname $1) && sha256sum $(basename $1)) >${TMPDIR}/${BUILD_NAME}.sha256sum
cp $1 ${TMPDIR}/${BUILD_NAME}
[ -d "${TMPDIR}/gsutil" ] || curl -sfL https://storage.googleapis.com/pub/gsutil.tar.gz | tar xz -C ${TMPDIR}
HOME=${TMPDIR}
PATH=${PATH}:${HOME}/gsutil
gsutil cp ${TMPDIR}/${BUILD_NAME}* gs://k3s-ci-builds || exit 1
aws s3 cp ${TMPDIR}/${BUILD_NAME}* s3://k3s-ci-builds || exit 1
echo "Build uploaded" >&2
echo "https://storage.googleapis.com/k3s-ci-builds/${BUILD_NAME}"
echo "https://k3s-ci-builds.s3.amazonaws.com/${BUILD_NAME}"

@ -1,5 +1,13 @@
#!/bin/bash
# Check for AWS Credentials
[ -n "$AWS_SECRET_ACCESS_KEY" ] || {
exit 0
}
[ -n "$AWS_ACCESS_KEY_ID" ] || {
exit 0
}
[ -d "$1" ] || {
echo "First argument should be a directory" >&2
exit 1
@ -18,33 +26,10 @@ cleanup() {
}
trap cleanup EXIT INT
GCLOUD_JSON=${TMPDIR}/.gcloud.json
[ -z "${GCLOUD_AUTH}" ] || echo "${GCLOUD_AUTH}" >${GCLOUD_JSON}
[ -s "${GCLOUD_JSON}" ] || {
echo "gcloud auth not defined" >&2
exit 1
}
BOTO_CONF=${TMPDIR}/.boto
[ -s "${BOTO_CONF}" ] || cat >${BOTO_CONF} <<END
[Credentials]
gs_service_key_file = ${GCLOUD_JSON}
[Boto]
https_validate_certificates = True
[GSUtil]
content_language = en
default_api_version = 2
default_project_id = rancher-dev
END
[ -d "${TMPDIR}/gsutil" ] || curl -sfL https://storage.googleapis.com/pub/gsutil.tar.gz | tar xz -C ${TMPDIR}
HOME=${TMPDIR}
PATH=$PATH:${HOME}/gsutil
LOG_TGZ=k3s-log-$(date +%s)-$("${GO}" env GOARCH)-$(git rev-parse --short HEAD)-$(basename $1).tgz
tar -cz -f ${TMPDIR}/${LOG_TGZ} -C $(dirname $1) $(basename $1)
gsutil cp ${TMPDIR}/${LOG_TGZ} gs://k3s-ci-logs || exit 1
aws s3 cp ${TMPDIR}/${LOG_TGZ} s3://k3s-ci-logs || exit 1
echo "Logs uploaded" >&2
echo "https://storage.googleapis.com/k3s-ci-logs/${LOG_TGZ}"
echo "https://k3s-ci-logs.s3.amazonaws.com/${LOG_TGZ}"

@ -3,17 +3,17 @@
iterations=0
curl -s -H 'Accept: application/vnd.github.v3+json' "https://api.github.com/repos/k3s-io/k3s/commits?per_page=5&sha=$1" | jq -r '.[] | .sha' &> $2
# The VMs take time on startup to hit googleapis.com, wait loop until we can
while ! curl -s --fail https://storage.googleapis.com/k3s-ci-builds > /dev/null; do
while ! curl -s --fail https://k3s-ci-builds.s3.amazonaws.com > /dev/null; do
((iterations++))
if [ "$iterations" -ge 30 ]; then
echo "Unable to hit googleapis.com/k3s-ci-builds"
echo "Unable to hit https://k3s-ci-builds.s3.amazonaws.com"
exit 1
fi
sleep 1
done
iterations=0
curl -s --fail https://storage.googleapis.com/k3s-ci-builds/k3s-$(head -n 1 $2).sha256sum
curl -s --fail https://k3s-ci-builds.s3.amazonaws.com/k3s-$(head -n 1 $2).sha256sum
while [ $? -ne 0 ]; do
((iterations++))
if [ "$iterations" -ge 6 ]; then
@ -22,5 +22,5 @@ while [ $? -ne 0 ]; do
fi
sed -i 1d "$2"
sleep 1
curl -s --fail https://storage.googleapis.com/k3s-ci-builds/k3s-$(head -n 1 $2).sha256sum
curl -s --fail https://k3s-ci-builds.s3.amazonaws.com/k3s-ci-builds/k3s-$(head -n 1 $2).sha256sum
done
Loading…
Cancel
Save