From e4ee2fadf7e737b30b155f4b3cda209f317ee7b4 Mon Sep 17 00:00:00 2001 From: Brad Davidson Date: Thu, 13 Aug 2020 11:23:06 -0700 Subject: [PATCH] Update release-1.16 to v1.16.14-k3s1 Signed-off-by: Brad Davidson --- Dockerfile.dapper | 6 +- Dockerfile.manifest | 2 +- Dockerfile.test.dapper | 4 +- Vagrantfile | 2 +- go.mod | 50 ++--- go.sum | 88 ++++----- package/Dockerfile | 2 +- .../k8s.io/apimachinery/pkg/util/sets/BUILD | 2 +- .../k8s.io/apiserver/pkg/server/healthz/BUILD | 2 +- .../apiserver/pkg/server/healthz/healthz.go | 18 +- vendor/k8s.io/client-go/pkg/version/base.go | 6 +- vendor/k8s.io/cloud-provider/go.sum | 4 +- vendor/k8s.io/component-base/version/base.go | 6 +- vendor/k8s.io/csi-translation-lib/go.sum | 4 +- vendor/k8s.io/kubectl/pkg/version/base.go | 6 +- .../pkg/credentialprovider/provider.go | 13 +- vendor/k8s.io/kubernetes/pkg/version/base.go | 6 +- .../pkg/volume/azure_dd/attacher.go | 2 +- .../pkg/volume/azure_file/azure_file.go | 2 +- .../legacy-cloud-providers/azure/azure.go | 3 + .../azure/azure_vmss.go | 127 ++++++++++-- .../azure/azure_vmss_cache.go | 182 ++++++++++++------ vendor/modules.txt | 42 ++-- 23 files changed, 374 insertions(+), 205 deletions(-) diff --git a/Dockerfile.dapper b/Dockerfile.dapper index e43eaf9eab..930410ab6d 100644 --- a/Dockerfile.dapper +++ b/Dockerfile.dapper @@ -1,9 +1,7 @@ -FROM golang:1.13.11-alpine3.10 +FROM golang:1.13.15-alpine3.12 RUN apk -U --no-cache add bash git gcc musl-dev docker vim less file curl wget ca-certificates jq linux-headers zlib-dev tar zip squashfs-tools npm coreutils \ - python2 python3 py3-pip python3-dev openssl-dev libffi-dev libseccomp libseccomp-dev make libuv-static -RUN pip3 install 'tox==3.6.0' -RUN apk -U --no-cache --repository http://dl-3.alpinelinux.org/alpine/edge/main/ add sqlite-dev sqlite-static + python2 openssl-dev libffi-dev libseccomp libseccomp-dev make libuv-static sqlite-dev sqlite-static libselinux libselinux-dev zlib-dev zlib-static RUN mkdir -p /go/src/golang.org/x && \ cd /go/src/golang.org/x && git clone https://github.com/golang/tools && \ git -C /go/src/golang.org/x/tools checkout -b current aa82965741a9fecd12b026fbb3d3c6ed3231b8f8 && \ diff --git a/Dockerfile.manifest b/Dockerfile.manifest index 45904d7f54..769595c69a 100644 --- a/Dockerfile.manifest +++ b/Dockerfile.manifest @@ -1,4 +1,4 @@ -FROM golang:1.13.6-alpine3.10 +FROM golang:1.13.15-alpine3.12 COPY --from=plugins/manifest:1.2.3 /bin/* /bin/ diff --git a/Dockerfile.test.dapper b/Dockerfile.test.dapper index 088bb59468..fd6e7cc892 100644 --- a/Dockerfile.test.dapper +++ b/Dockerfile.test.dapper @@ -1,4 +1,4 @@ -FROM golang:1.13.6-alpine3.10 +FROM golang:1.13.15-alpine3.12 RUN apk -U --no-cache add bash git gcc musl-dev docker curl jq coreutils python2 openssl @@ -6,7 +6,7 @@ ARG DAPPER_HOST_ARCH ENV ARCH $DAPPER_HOST_ARCH RUN if [ "${ARCH}" == "amd64" ] || [ "${ARCH}" == "arm64" ]; then \ - VERSION=0.17.1 OS=linux && \ + VERSION=0.18.4 OS=linux && \ curl -sL "https://github.com/vmware-tanzu/sonobuoy/releases/download/v${VERSION}/sonobuoy_${VERSION}_${OS}_${ARCH}.tar.gz" | \ tar -xzf - -C /usr/local/bin; \ fi diff --git a/Vagrantfile b/Vagrantfile index 8a962e3656..f44d1de809 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -1,4 +1,4 @@ -BOX = "generic/alpine310" +BOX = "generic/alpine312" HOME = File.dirname(__FILE__) PROJECT = File.basename(HOME) MOUNT_TYPE = ENV['MOUNT_TYPE'] || "nfs" diff --git a/go.mod b/go.mod index 78a72e3772..f19e5c8890 100644 --- a/go.mod +++ b/go.mod @@ -34,31 +34,31 @@ replace ( github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a - k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.13-k3s1 - k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.13-k3s1 - k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.13-k3s1 - k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.13-k3s1 - k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.13-k3s1 - k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.13-k3s1 - k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.13-k3s1 - k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.13-k3s1 - k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.13-k3s1 - k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.13-k3s1 - k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.13-k3s1 - k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.13-k3s1 - k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.13-k3s1 - k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.13-k3s1 - k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.13-k3s1 - k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.13-k3s1 - k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.13-k3s1 - k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.13-k3s1 - k8s.io/kubernetes => github.com/rancher/kubernetes v1.16.13-k3s1 - k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.13-k3s1 - k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.13-k3s1 - k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.16.13-k3s1 - k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.13-k3s1 - k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.16.13-k3s1 - k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.16.13-k3s1 + k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.14-k3s1 + k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.14-k3s1 + k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.14-k3s1 + k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.14-k3s1 + k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.14-k3s1 + k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.14-k3s1 + k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.14-k3s1 + k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.14-k3s1 + k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.14-k3s1 + k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.14-k3s1 + k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.14-k3s1 + k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.14-k3s1 + k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.14-k3s1 + k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.14-k3s1 + k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.14-k3s1 + k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.14-k3s1 + k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.14-k3s1 + k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.14-k3s1 + k8s.io/kubernetes => github.com/rancher/kubernetes v1.16.14-k3s1 + k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.14-k3s1 + k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.14-k3s1 + k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.16.14-k3s1 + k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.14-k3s1 + k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.16.14-k3s1 + k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.16.14-k3s1 mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 ) diff --git a/go.sum b/go.sum index c6dc0b5445..75681c2e3b 100644 --- a/go.sum +++ b/go.sum @@ -190,6 +190,7 @@ github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= +github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= @@ -407,6 +408,7 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jetstack/cert-manager v0.7.2/go.mod h1:nbddmhjWxYGt04bxvwVGUSeLhZ2PCyNvd7MpXdq+yWY= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= @@ -601,49 +603,49 @@ github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd h1:KPnQ github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd/go.mod h1:QYmg8cqWPPfIbpEuhtJbEdWwA6PEKSY016Z6EdfL9+8= github.com/rancher/kine v0.2.5 h1:UE0HrxloO95zPEXYN/n8Rwejx276fc7s8I5JbJcLdmY= github.com/rancher/kine v0.2.5/go.mod h1:SdBUuE7e3XyrJvdBxCl9TMMapF+wyZnMZSP/H59OqNE= -github.com/rancher/kubernetes v1.16.13-k3s1 h1:jqiwkowG6bokzvYucdV475vTB3zoynAD4ywsOftIdJs= -github.com/rancher/kubernetes v1.16.13-k3s1/go.mod h1:I1/NbQo7HFnBycd5xkM76qBKCQx90j4t4PMD6MbJ5K0= -github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.13-k3s1 h1:GS6aHxaoeMeyN4vuPuGW+d3srsgLUC6VREjmleou+LU= -github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.13-k3s1/go.mod h1:hF711SYP9H3Bqm/pquHb7I9hmYCbyZmz7AZRaXu1rqE= -github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.13-k3s1 h1:X8Rq5hISSLQ/z/fkBXiiHJhttPQw08f1lr8LX/bHTFA= -github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.13-k3s1/go.mod h1:VBfb4GYcC+wWE9En7Qiiy2GtEYHyu7+OF4va++HcNEA= -github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.13-k3s1 h1:DrOpZ2BNQBS95ELh591pmnrCYXBPN6i42Qe2eIoXiyU= -github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.13-k3s1/go.mod h1:DHoqDukC6AO78SjdpZDs4uxWsr8ELOtSgnpfcmuv3cw= -github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.13-k3s1 h1:7vs6HlDgUqEXIxYooHxxPV5gfTe6H1oON7GLWkDE4PY= -github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.13-k3s1/go.mod h1:1HjCjgDtd6sh87IYWH4Mpt6Zucx/CPcRFdw2zxc0Csw= -github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.13-k3s1 h1:oiRexGObV5lhNBGJhNmrrrJb2Dhf6XTXKFyOLpzN45M= -github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.13-k3s1/go.mod h1:VpFACNqiYpPtmofpGz77fAfDTDyOnbvU0rJ2CpM40A4= -github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.13-k3s1 h1:A/9zg8HGH75Os8Y5tRiVLVMqS59YbXVH+8GJy79SH9M= -github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.13-k3s1/go.mod h1:o1JsPLiZ5bL+KsLEe/wHo65emfcWBAsa0hSpWuMRX80= -github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.13-k3s1 h1:e9aWVmT2wY6/iCc0TPGZnpmYBwD2hHuD1z+//W7gIPw= -github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.13-k3s1/go.mod h1:mkOG2NRE3z5O+q3d1mmg3DiltNAqprjQWCJEqS941Sk= -github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.13-k3s1 h1:YaJYT2v7Ps0Yv2laYdkb/RwnY7Wa2JIhAeFScX2+UJk= -github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.13-k3s1/go.mod h1:VRJM3GFPwH5SheYrgtmNLDThbGZV36jQbE4KLzoI9bg= -github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.13-k3s1 h1:3EGI0YkUcav8wwM1Pa8MG0yVLnCRrAKRB97I2oFkNsA= -github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.13-k3s1/go.mod h1:uv2Gaa8WhyS9L9Zx7EUK8aan29bm30xfX3KU/Ch440E= -github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.13-k3s1 h1:xN+3UVHIxNk60J7quJMErRe8gOANPFvE+Ou8DepRRb0= -github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.13-k3s1/go.mod h1:f4FAu7hsUOeHiLm8TTj5sA9RbgPp4cMYiMD7III70Uc= -github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.13-k3s1 h1:d3kkJj9KxWbsicd96Gej/VOnBgkUaV4P4CJ09wDkszk= -github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.13-k3s1/go.mod h1:cBkf5Pgf0kssF+HGNYRkpkOLu2WYWB5OugNuN1DDCTI= -github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.13-k3s1 h1:kg1Em78BsxBEliMrIfsOj82y4ORKfzL0c2sDhSyEvwg= -github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.13-k3s1/go.mod h1:74gMNDhIex44m9pwyDxvegJ/1iwIV+GeY4vnqW+elB0= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.13-k3s1 h1:hHKZRnXSzpYOCWFqNlc6wLM3hdlBXsJtFIFnl/NVQbk= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.13-k3s1/go.mod h1:SlYQUXX/Hw1T85y62+sOJfqHQmeWD26skTx0NdA8zH8= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.13-k3s1 h1:mt6ATAp4BLp20Iz/9TZcgarpHC+/a8n18rXb/cG4+KM= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.13-k3s1/go.mod h1:FrrkxvHzedrKyAIOTcAJjBonY3PstmNd+OlcwBg+j3Y= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.13-k3s1 h1:0pfkTHBM7P6sTFwyC6dBONXYhB8eJvBljiiEAfKrbRE= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.13-k3s1/go.mod h1:xy5tBvt9vGo0qIyDI+z0lQRj4FBPmDvVTCkB1vnKg4w= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.13-k3s1 h1:1FmfeUjtskwLjRNA8TXMPUpoDzF4bH+6SzYouAUhB3s= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.13-k3s1/go.mod h1:jqwAYW696VyYRvVNjjqC4MYV0N6SiKdfx+nyfPlIwqM= -github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.13-k3s1 h1:Z9wdoU+7bBC+9jdLAm30IFr12MhYElKQ34vgGlePYws= -github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.13-k3s1/go.mod h1:LUej6lcxGM6bqhhyBlQWI+HOHoWYjjphGkZ95geH4zA= -github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.13-k3s1 h1:oZacu2U7hinvNvx4NmOfAHrLcgL2PezosmH4jJ2t0fE= -github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.13-k3s1/go.mod h1:TYwPJHyAg97PV1XzgDrp/il12tZ5cwWHT6tECzCEwG0= -github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.13-k3s1 h1:UTPsoHOQYVcUg+CtdveNuvGxh0+HtteFPrHpZ6XZatg= -github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.13-k3s1/go.mod h1:tUmOWcMug23gITlfkI8tDjgeDdD7xiNR6ylYS0LavV4= -github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.13-k3s1 h1:9nOZ51v0vRehRJbRyUgZIA2OYPLrCPb0PVYoUDZCMB8= -github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.13-k3s1/go.mod h1:pM/0ywERZODloMTAJKiUWRfmKBEhCf8oWgzBm1N5M/o= -github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.13-k3s1/go.mod h1:3PrDeYVltXOLKVrQfe+DdFbJ9HL59vEOq9cob0Twfxo= +github.com/rancher/kubernetes v1.16.14-k3s1 h1:+bDWkebmYqbCUE6vvGQJLPDlO0fYaQXquAJlojtIzZ0= +github.com/rancher/kubernetes v1.16.14-k3s1/go.mod h1:dwLyz+30VDCIk0Ff1PezMKrtHYAso9U9a1PLV4UBPTg= +github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.14-k3s1 h1:YWDZ7dBDB8y1RAN7WejorPb/vxe3o8Hh6i1pYqKvTUY= +github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.14-k3s1/go.mod h1:hF711SYP9H3Bqm/pquHb7I9hmYCbyZmz7AZRaXu1rqE= +github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.14-k3s1 h1:V770+onnMRHfkPu1+VTt6DqvhrAHCwKzICJZnOrfQW4= +github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.14-k3s1/go.mod h1:VBfb4GYcC+wWE9En7Qiiy2GtEYHyu7+OF4va++HcNEA= +github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.14-k3s1 h1:Vez3LNjDR/7EBO7JtNEv26F7RKaOomhwHZwr14Gwk5g= +github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.14-k3s1/go.mod h1:PEApA0VCncLclBrgM37jFnGfgLs/9q2zFMpu3ZMM8aE= +github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.14-k3s1 h1:9A98arULT0QQlmzP9dVfBu9tYP0wCj1qT/6PoKXuxnQ= +github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.14-k3s1/go.mod h1:mTMIwRMmiFs3/tDA7r2FkXUynBBe4WbeDpYPCFfw9UU= +github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.14-k3s1 h1:Sx4Hp1/vFphhg6j7m3VDNzxsPrPoqek+wHJOxW5LmUk= +github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.14-k3s1/go.mod h1:lOJ6TH8ZYtrBR16jzdDXpqfq8ggiwtOycxTAERTxw9w= +github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.14-k3s1 h1:zLimWNNzzWqu3VCFBXsMzOSk5f0TrfhBf2c+rA7/iGU= +github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.14-k3s1/go.mod h1:6qxY6Kj3K9E2F7lU4GMIfOYH3NETeC7c4p2yhJcRLiw= +github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.14-k3s1 h1:P1JX80SQYskwaTdoAwF9mEskxmQtljFvXyFOTVPGfSI= +github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.14-k3s1/go.mod h1:mkOG2NRE3z5O+q3d1mmg3DiltNAqprjQWCJEqS941Sk= +github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.14-k3s1 h1:NugWtq8NNS75E0LC7nBrAj722yrIw8DUCT/YxOssh9Y= +github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.14-k3s1/go.mod h1:VRJM3GFPwH5SheYrgtmNLDThbGZV36jQbE4KLzoI9bg= +github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.14-k3s1 h1:1jLUtT37D0AOMbn5kg1kPcVw0OwWOT/D7l//eQafAjk= +github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.14-k3s1/go.mod h1:uv2Gaa8WhyS9L9Zx7EUK8aan29bm30xfX3KU/Ch440E= +github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.14-k3s1 h1:lHv7Q4lcEHCHhjjZ1t1gB/4NPetjaIh7wlQTuYW1oS0= +github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.14-k3s1/go.mod h1:f4FAu7hsUOeHiLm8TTj5sA9RbgPp4cMYiMD7III70Uc= +github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.14-k3s1 h1:63n/lON9KT45VkUlx+HxTZaigOQTIFyZgO2CZi7f3Sg= +github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.14-k3s1/go.mod h1:cBkf5Pgf0kssF+HGNYRkpkOLu2WYWB5OugNuN1DDCTI= +github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.14-k3s1 h1:jIebpn9BzF6Aa+4vhYdlOCQeOEJe2yJH7D+G3tomags= +github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.14-k3s1/go.mod h1:74gMNDhIex44m9pwyDxvegJ/1iwIV+GeY4vnqW+elB0= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.14-k3s1 h1:rfUYWLsi4uL9F8D0CatuzgRkKvPO2+fKkV3rbhoHKzw= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.14-k3s1/go.mod h1:SlYQUXX/Hw1T85y62+sOJfqHQmeWD26skTx0NdA8zH8= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.14-k3s1 h1:9RXhXVOgarZcXyUp6JwJ4vR5zDOHLYoGlKxPTECuRIU= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.14-k3s1/go.mod h1:FrrkxvHzedrKyAIOTcAJjBonY3PstmNd+OlcwBg+j3Y= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.14-k3s1 h1:9WNVNk0wlcV2ep2CDWdvB2piQ5AEtGv/6zGMkbFaWSc= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.14-k3s1/go.mod h1:xy5tBvt9vGo0qIyDI+z0lQRj4FBPmDvVTCkB1vnKg4w= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.14-k3s1 h1:KCKvUvYzTELCtIQB5P+PfAvnYp53TkoWKnqJWx6e+O0= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.14-k3s1/go.mod h1:jqwAYW696VyYRvVNjjqC4MYV0N6SiKdfx+nyfPlIwqM= +github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.14-k3s1 h1:+tReO4I1Q71ePtQS9uFjJLfCrg/HDhUIrrmGLAEqqa0= +github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.14-k3s1/go.mod h1:l8e9QWtoyfzXGnOmL57pjUnXsTB3yRqn9RLtB2ARK3Q= +github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.14-k3s1 h1:9Tdwj8TkOKiVInofNj+lnMIDJMINTITT0dNO8UN1IJs= +github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.14-k3s1/go.mod h1:TYwPJHyAg97PV1XzgDrp/il12tZ5cwWHT6tECzCEwG0= +github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.14-k3s1 h1:7nTABNRAj4+GTno3w7ePPcKz0ecIL0GYuVg8Emef5ls= +github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.14-k3s1/go.mod h1:tUmOWcMug23gITlfkI8tDjgeDdD7xiNR6ylYS0LavV4= +github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.14-k3s1 h1:bqvzjWV7CaT5fhybGhNx2cRGZqsl9u4I1pHVwIgO0tE= +github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.14-k3s1/go.mod h1:pM/0ywERZODloMTAJKiUWRfmKBEhCf8oWgzBm1N5M/o= +github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.14-k3s1/go.mod h1:3PrDeYVltXOLKVrQfe+DdFbJ9HL59vEOq9cob0Twfxo= github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009 h1:Xsxh7fX3+2wAUJtPy8g2lZh0cYuyifqhBL0vxCIYojs= github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U= github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc= diff --git a/package/Dockerfile b/package/Dockerfile index b849cf759c..16fb626e3f 100644 --- a/package/Dockerfile +++ b/package/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.10 as base +FROM alpine:3.12 as base RUN apk add -U ca-certificates ADD build/out/data.tar.gz /image RUN mkdir -p /image/etc/ssl/certs /image/run /image/var/run /image/tmp /image/lib/modules /image/lib/firmware && \ diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD b/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD index 0c4c00d725..24e30d0017 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD @@ -38,7 +38,7 @@ go_genrule( ], cmd = """ $(location //vendor/k8s.io/code-generator/cmd/set-gen) \ - --input-dirs k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/sets/types \ + --input-dirs k8s.io/apimachinery/pkg/util/sets/types \ --output-base $$(dirname $$(dirname $(location :byte.go))) \ --go-header-file $(location //hack/boilerplate:boilerplate.generatego.txt) \ --output-package sets diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD b/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD index 82a940b776..a094fbf275 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD @@ -11,6 +11,7 @@ go_test( srcs = ["healthz_test.go"], embed = [":go_default_library"], deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], ) @@ -27,7 +28,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library", - "//staging/src/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go index 0cacdf6a07..08633f368e 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "net/http" + "reflect" "strings" "sync" "sync/atomic" @@ -28,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/server/httplog" - "k8s.io/client-go/informers" "k8s.io/klog" ) @@ -81,16 +81,20 @@ func (l *log) Check(_ *http.Request) error { return fmt.Errorf("logging blocked") } +type cacheSyncWaiter interface { + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool +} + type informerSync struct { - sharedInformerFactory informers.SharedInformerFactory + cacheSyncWaiter cacheSyncWaiter } var _ HealthChecker = &informerSync{} -// NewInformerSyncHealthz returns a new HealthChecker that will pass only if all informers in the given sharedInformerFactory sync. -func NewInformerSyncHealthz(sharedInformerFactory informers.SharedInformerFactory) HealthChecker { +// NewInformerSyncHealthz returns a new HealthChecker that will pass only if all informers in the given cacheSyncWaiter sync. +func NewInformerSyncHealthz(cacheSyncWaiter cacheSyncWaiter) HealthChecker { return &informerSync{ - sharedInformerFactory: sharedInformerFactory, + cacheSyncWaiter: cacheSyncWaiter, } } @@ -103,8 +107,8 @@ func (i *informerSync) Check(_ *http.Request) error { // Close stopCh to force checking if informers are synced now. close(stopCh) - var informersByStarted map[bool][]string - for informerType, started := range i.sharedInformerFactory.WaitForCacheSync(stopCh) { + informersByStarted := make(map[bool][]string) + for informerType, started := range i.cacheSyncWaiter.WaitForCacheSync(stopCh) { informersByStarted[started] = append(informersByStarted[started], informerType.String()) } diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index 443a082eeb..41c6f4acdf 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -3,8 +3,8 @@ package version var ( gitMajor = "1" gitMinor = "16" - gitVersion = "v1.16.13-k3s1" - gitCommit = "0b9525c21107dd05b47060ade21a607549116813" + gitVersion = "v1.16.14-k3s1" + gitCommit = "c77d5acdb97508565b5042072d218d48a345eec1" gitTreeState = "clean" - buildDate = "2020-07-16T00:35:22Z" + buildDate = "2020-08-13T18:17:45Z" ) diff --git a/vendor/k8s.io/cloud-provider/go.sum b/vendor/k8s.io/cloud-provider/go.sum index 1851b078a0..35b547701f 100644 --- a/vendor/k8s.io/cloud-provider/go.sum +++ b/vendor/k8s.io/cloud-provider/go.sum @@ -21,7 +21,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -64,6 +64,7 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -94,6 +95,7 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/vendor/k8s.io/component-base/version/base.go b/vendor/k8s.io/component-base/version/base.go index 443a082eeb..41c6f4acdf 100644 --- a/vendor/k8s.io/component-base/version/base.go +++ b/vendor/k8s.io/component-base/version/base.go @@ -3,8 +3,8 @@ package version var ( gitMajor = "1" gitMinor = "16" - gitVersion = "v1.16.13-k3s1" - gitCommit = "0b9525c21107dd05b47060ade21a607549116813" + gitVersion = "v1.16.14-k3s1" + gitCommit = "c77d5acdb97508565b5042072d218d48a345eec1" gitTreeState = "clean" - buildDate = "2020-07-16T00:35:22Z" + buildDate = "2020-08-13T18:17:45Z" ) diff --git a/vendor/k8s.io/csi-translation-lib/go.sum b/vendor/k8s.io/csi-translation-lib/go.sum index 3dc527cf6d..af1e795770 100644 --- a/vendor/k8s.io/csi-translation-lib/go.sum +++ b/vendor/k8s.io/csi-translation-lib/go.sum @@ -21,7 +21,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -56,6 +56,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -83,6 +84,7 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/vendor/k8s.io/kubectl/pkg/version/base.go b/vendor/k8s.io/kubectl/pkg/version/base.go index 443a082eeb..41c6f4acdf 100644 --- a/vendor/k8s.io/kubectl/pkg/version/base.go +++ b/vendor/k8s.io/kubectl/pkg/version/base.go @@ -3,8 +3,8 @@ package version var ( gitMajor = "1" gitMinor = "16" - gitVersion = "v1.16.13-k3s1" - gitCommit = "0b9525c21107dd05b47060ade21a607549116813" + gitVersion = "v1.16.14-k3s1" + gitCommit = "c77d5acdb97508565b5042072d218d48a345eec1" gitTreeState = "clean" - buildDate = "2020-07-16T00:35:22Z" + buildDate = "2020-08-13T18:17:45Z" ) diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go index 36648a9b5b..e2ab4b584d 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go @@ -58,6 +58,10 @@ type CachingDockerConfigProvider struct { Provider DockerConfigProvider Lifetime time.Duration + // ShouldCache is an optional function that returns true if the specific config should be cached. + // If nil, all configs are treated as cacheable. + ShouldCache func(DockerConfig) bool + // cache fields cacheDockerConfig DockerConfig expiration time.Time @@ -96,7 +100,10 @@ func (d *CachingDockerConfigProvider) Provide(image string) DockerConfig { } klog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String()) - d.cacheDockerConfig = d.Provider.Provide(image) - d.expiration = time.Now().Add(d.Lifetime) - return d.cacheDockerConfig + config := d.Provider.Provide(image) + if d.ShouldCache == nil || d.ShouldCache(config) { + d.cacheDockerConfig = config + d.expiration = time.Now().Add(d.Lifetime) + } + return config } diff --git a/vendor/k8s.io/kubernetes/pkg/version/base.go b/vendor/k8s.io/kubernetes/pkg/version/base.go index 443a082eeb..41c6f4acdf 100644 --- a/vendor/k8s.io/kubernetes/pkg/version/base.go +++ b/vendor/k8s.io/kubernetes/pkg/version/base.go @@ -3,8 +3,8 @@ package version var ( gitMajor = "1" gitMinor = "16" - gitVersion = "v1.16.13-k3s1" - gitCommit = "0b9525c21107dd05b47060ade21a607549116813" + gitVersion = "v1.16.14-k3s1" + gitCommit = "c77d5acdb97508565b5042072d218d48a345eec1" gitTreeState = "clean" - buildDate = "2020-07-16T00:35:22Z" + buildDate = "2020-08-13T18:17:45Z" ) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go index 67d368a2ab..7368e32f2f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go @@ -163,7 +163,7 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, newDevicePath := "" - err = wait.Poll(1*time.Second, timeout, func() (bool, error) { + err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) { if newDevicePath, err = findDiskByLun(int(lun), io, exec); err != nil { return false, fmt.Errorf("azureDisk - WaitForAttach ticker failed node (%s) disk (%s) lun(%v) err(%s)", nodeName, diskName, lun, err) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go index cb890c53c7..98c65d5c32 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go @@ -294,7 +294,7 @@ func (b *azureFileMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) e } mountComplete := false - err = wait.Poll(5*time.Second, 10*time.Minute, func() (bool, error) { + err = wait.PollImmediate(1*time.Second, 2*time.Minute, func() (bool, error) { err := b.mounter.Mount(source, dir, "cifs", mountOptions) mountComplete = true return true, err diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure.go index 3092889771..ff6dde3ac4 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure.go @@ -173,6 +173,9 @@ type Config struct { // LoadBalancerResourceGroup determines the specific resource group of the load balancer user want to use, working // with LoadBalancerName LoadBalancerResourceGroup string `json:"loadBalancerResourceGroup,omitempty" yaml:"loadBalancerResourceGroup,omitempty"` + + // VmssVirtualMachinesCacheTTLInSeconds sets the cache TTL for vmssVirtualMachines + VmssVirtualMachinesCacheTTLInSeconds int `json:"vmssVirtualMachinesCacheTTLInSeconds,omitempty" yaml:"vmssVirtualMachinesCacheTTLInSeconds,omitempty"` } var _ cloudprovider.Interface = (*Cloud)(nil) diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index 9631f5f78c..e2be5f784a 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -61,6 +61,13 @@ const ( vmssVMInstanceUpdateDelay = 3 * time.Second ) +// nodeIdentity identifies a node within a subscription. +type nodeIdentity struct { + resourceGroup string + vmssName string + nodeName string +} + // scaleSet implements VMSet interface for Azure scale set. type scaleSet struct { *Cloud @@ -70,7 +77,7 @@ type scaleSet struct { availabilitySet VMSet vmssCache *timedCache - vmssVMCache *timedCache + vmssVMCache *sync.Map // [resourcegroup/vmssname]*timedCache availabilitySetNodesCache *timedCache } @@ -80,6 +87,7 @@ func newScaleSet(az *Cloud) (VMSet, error) { ss := &scaleSet{ Cloud: az, availabilitySet: newAvailabilitySet(az), + vmssVMCache: &sync.Map{}, } ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache() @@ -92,11 +100,6 @@ func newScaleSet(az *Cloud) (VMSet, error) { return nil, err } - ss.vmssVMCache, err = ss.newVMSSVirtualMachinesCache() - if err != nil { - return nil, err - } - return ss, nil } @@ -137,12 +140,17 @@ func (ss *scaleSet) getVMSS(vmssName string, crt cacheReadType) (*compute.Virtua return vmss, nil } -// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache. -// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets. -func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) { +// getVmssVMByNodeIdentity find virtualMachineScaleSetVM by nodeIdentity, using node's parent VMSS cache. +// Returns cloudprovider.InstanceNotFound if the node does not belong to the scale set named in nodeIdentity. +func (ss *scaleSet) getVmssVMByNodeIdentity(node *nodeIdentity, crt cacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) { + cacheKey, cache, err := ss.getVMSSVMCache(node.resourceGroup, node.vmssName) + if err != nil { + return "", "", nil, err + } + getter := func(nodeName string, crt cacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, bool, error) { var found bool - cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt) + cached, err := cache.Get(cacheKey, crt) if err != nil { return "", "", nil, found, err } @@ -157,19 +165,19 @@ func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, strin return "", "", nil, found, nil } - _, err := getScaleSetVMInstanceID(nodeName) + _, err = getScaleSetVMInstanceID(node.nodeName) if err != nil { return "", "", nil, err } - vmssName, instanceID, vm, found, err := getter(nodeName, crt) + vmssName, instanceID, vm, found, err := getter(node.nodeName, crt) if err != nil { return "", "", nil, err } if !found { - klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", nodeName) - vmssName, instanceID, vm, found, err = getter(nodeName, cacheReadTypeForceRefresh) + klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", node.nodeName) + vmssName, instanceID, vm, found, err = getter(node.nodeName, cacheReadTypeForceRefresh) if err != nil { return "", "", nil, err } @@ -185,6 +193,17 @@ func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, strin return vmssName, instanceID, vm, nil } +// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache. +// Returns cloudprovider.InstanceNotFound if nodeName does not belong to any scale set. +func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) { + node, err := ss.getNodeIdentityByNodeName(nodeName, crt) + if err != nil { + return "", "", nil, err + } + + return ss.getVmssVMByNodeIdentity(node, crt) +} + // GetPowerStatusByNodeName returns the power state of the specified node. func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) { managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe) @@ -220,8 +239,13 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er // getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache. // The node must belong to one of scale sets. func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string, crt cacheReadType) (*compute.VirtualMachineScaleSetVM, error) { + cacheKey, cache, err := ss.getVMSSVMCache(resourceGroup, scaleSetName) + if err != nil { + return nil, err + } + getter := func(crt cacheReadType) (vm *compute.VirtualMachineScaleSetVM, found bool, err error) { - cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt) + cached, err := cache.Get(cacheKey, crt) if err != nil { return nil, false, err } @@ -257,6 +281,13 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI if found && vm != nil { return vm, nil } + if found && vm == nil { + klog.V(2).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache if it is expired", scaleSetName, instanceID) + vm, found, err = getter(cacheReadTypeDefault) + if err != nil { + return nil, err + } + } if !found || vm == nil { return nil, cloudprovider.InstanceNotFound } @@ -585,6 +616,66 @@ func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) { return ssNames, nil } +// getNodeIdentityByNodeName use the VMSS cache to find a node's resourcegroup and vmss, returned in a nodeIdentity. +func (ss *scaleSet) getNodeIdentityByNodeName(nodeName string, crt cacheReadType) (*nodeIdentity, error) { + getter := func(nodeName string, crt cacheReadType) (*nodeIdentity, error) { + node := &nodeIdentity{ + nodeName: nodeName, + } + + cached, err := ss.vmssCache.Get(vmssKey, crt) + if err != nil { + return nil, err + } + + vmsses := cached.(*sync.Map) + vmsses.Range(func(key, value interface{}) bool { + v := value.(*vmssEntry) + if v.vmss.Name == nil { + return true + } + + vmssPrefix := *v.vmss.Name + if v.vmss.VirtualMachineProfile != nil && + v.vmss.VirtualMachineProfile.OsProfile != nil && + v.vmss.VirtualMachineProfile.OsProfile.ComputerNamePrefix != nil { + vmssPrefix = *v.vmss.VirtualMachineProfile.OsProfile.ComputerNamePrefix + } + + if strings.EqualFold(vmssPrefix, nodeName[:len(nodeName)-6]) { + node.vmssName = *v.vmss.Name + node.resourceGroup = v.resourceGroup + return false + } + + return true + }) + return node, nil + } + + if _, err := getScaleSetVMInstanceID(nodeName); err != nil { + return nil, err + } + + node, err := getter(nodeName, crt) + if err != nil { + return nil, err + } + if node.vmssName != "" { + return node, nil + } + + klog.V(2).Infof("Couldn't find VMSS for node %s, refreshing the cache", nodeName) + node, err = getter(nodeName, cacheReadTypeForceRefresh) + if err != nil { + return nil, err + } + if node.vmssName == "" { + return nil, cloudprovider.InstanceNotFound + } + return node, nil +} + // listScaleSetVMs lists VMs belonging to the specified scale set. func (ss *scaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compute.VirtualMachineScaleSetVM, error) { var err error @@ -967,6 +1058,12 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back if ss.excludeMasterNodesFromStandardLB() && isMasterNode(node) { continue } + + if ss.ShouldNodeExcludedFromLoadBalancer(node) { + klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name) + continue + } + // in this scenario the vmSetName is an empty string and the name of vmss should be obtained from the provider IDs of nodes resourceGroupName, vmssName, err := getVmssAndResourceGroupNameByVMProviderID(node.Spec.ProviderID) if err != nil { diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go index 8c05735beb..210dfee603 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go @@ -20,6 +20,7 @@ package azure import ( "context" + "fmt" "strings" "sync" "time" @@ -36,12 +37,13 @@ var ( vmssCacheSeparator = "#" vmssKey = "k8svmssKey" - vmssVirtualMachinesKey = "k8svmssVirtualMachinesKey" availabilitySetNodesKey = "k8sAvailabilitySetNodesKey" availabilitySetNodesCacheTTL = 15 * time.Minute vmssTTL = 10 * time.Minute vmssVirtualMachinesTTL = 10 * time.Minute + + vmssVirtualMachinesCacheTTLDefaultInSeconds = 600 ) type vmssVirtualMachinesEntry struct { @@ -53,8 +55,9 @@ type vmssVirtualMachinesEntry struct { } type vmssEntry struct { - vmss *compute.VirtualMachineScaleSet - lastUpdate time.Time + vmss *compute.VirtualMachineScaleSet + resourceGroup string + lastUpdate time.Time } func (ss *scaleSet) newVMSSCache() (*timedCache, error) { @@ -80,8 +83,9 @@ func (ss *scaleSet) newVMSSCache() (*timedCache, error) { continue } localCache.Store(*scaleSet.Name, &vmssEntry{ - vmss: &scaleSet, - lastUpdate: time.Now().UTC(), + vmss: &scaleSet, + resourceGroup: resourceGroup, + lastUpdate: time.Now().UTC(), }) } } @@ -106,15 +110,58 @@ func extractVmssVMName(name string) (string, string, error) { return ssName, instanceID, nil } -func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) { +// getVMSSVMCache returns an *timedCache and cache key for a VMSS (creating that cache if new). +func (ss *scaleSet) getVMSSVMCache(resourceGroup, vmssName string) (string, *timedCache, error) { + cacheKey := strings.ToLower(fmt.Sprintf("%s/%s", resourceGroup, vmssName)) + if entry, ok := ss.vmssVMCache.Load(cacheKey); ok { + cache := entry.(*timedCache) + return cacheKey, cache, nil + } + + cache, err := ss.newVMSSVirtualMachinesCache(resourceGroup, vmssName, cacheKey) + if err != nil { + return "", nil, err + } + ss.vmssVMCache.Store(cacheKey, cache) + return cacheKey, cache, nil +} + +// gcVMSSVMCache delete stale VMSS VMs caches from deleted VMSSes. +func (ss *scaleSet) gcVMSSVMCache() error { + cached, err := ss.vmssCache.Get(vmssKey, cacheReadTypeUnsafe) + if err != nil { + return err + } + + vmsses := cached.(*sync.Map) + removed := map[string]bool{} + ss.vmssVMCache.Range(func(key, value interface{}) bool { + cacheKey := key.(string) + vlistIdx := cacheKey[strings.LastIndex(cacheKey, "/")+1:] + if _, ok := vmsses.Load(vlistIdx); !ok { + removed[cacheKey] = true + } + return true + }) + + for key := range removed { + ss.vmssVMCache.Delete(key) + } + + return nil +} + +// newVMSSVirtualMachinesCache instanciates a new VMs cache for VMs belonging to the provided VMSS. +func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cacheKey string) (*timedCache, error) { getter := func(key string) (interface{}, error) { localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry oldCache := make(map[string]vmssVirtualMachinesEntry) - if ss.vmssVMCache != nil { + if vmssCache, ok := ss.vmssVMCache.Load(cacheKey); ok { // get old cache before refreshing the cache - entry, exists, err := ss.vmssVMCache.store.GetByKey(vmssVirtualMachinesKey) + cache := vmssCache.(*timedCache) + entry, exists, err := cache.store.GetByKey(cacheKey) if err != nil { return nil, err } @@ -130,70 +177,61 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) { } } - allResourceGroups, err := ss.GetResourceGroups() + vms, err := ss.listScaleSetVMs(vmssName, resourceGroupName) if err != nil { return nil, err } - for _, resourceGroup := range allResourceGroups.List() { - scaleSetNames, err := ss.listScaleSets(resourceGroup) - if err != nil { - return nil, err + for i := range vms { + vm := vms[i] + if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil { + klog.Warningf("failed to get computerName for vmssVM (%q)", vmssName) + continue } - for _, ssName := range scaleSetNames { - vms, err := ss.listScaleSetVMs(ssName, resourceGroup) - if err != nil { - return nil, err - } + computerName := strings.ToLower(*vm.OsProfile.ComputerName) + vmssVMCacheEntry := &vmssVirtualMachinesEntry{ + resourceGroup: resourceGroupName, + vmssName: vmssName, + instanceID: to.String(vm.InstanceID), + virtualMachine: &vm, + lastUpdate: time.Now().UTC(), + } + // set cache entry to nil when the VM is under deleting. + if vm.VirtualMachineScaleSetVMProperties != nil && + strings.EqualFold(to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) { + klog.V(4).Infof("VMSS virtualMachine %q is under deleting, setting its cache to nil", computerName) + vmssVMCacheEntry.virtualMachine = nil + } + localCache.Store(computerName, vmssVMCacheEntry) - for i := range vms { - vm := vms[i] - if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil { - klog.Warningf("failed to get computerName for vmssVM (%q)", ssName) - continue - } + delete(oldCache, computerName) + } - computerName := strings.ToLower(*vm.OsProfile.ComputerName) - localCache.Store(computerName, &vmssVirtualMachinesEntry{ - resourceGroup: resourceGroup, - vmssName: ssName, - instanceID: to.String(vm.InstanceID), - virtualMachine: &vm, - lastUpdate: time.Now().UTC(), - }) - - if _, exists := oldCache[computerName]; exists { - delete(oldCache, computerName) - } - } + // add old missing cache data with nil entries to prevent aggressive + // ARM calls during cache invalidation + for name, vmEntry := range oldCache { + // if the nil cache entry has existed for 15 minutes in the cache + // then it should not be added back to the cache + if vmEntry.virtualMachine == nil && time.Since(vmEntry.lastUpdate) > 15*time.Minute { + klog.V(5).Infof("ignoring expired entries from old cache for %s", name) + continue + } + lastUpdate := time.Now().UTC() + if vmEntry.virtualMachine == nil { + // if this is already a nil entry then keep the time the nil + // entry was first created, so we can cleanup unwanted entries + lastUpdate = vmEntry.lastUpdate } - // add old missing cache data with nil entries to prevent aggressive - // ARM calls during cache invalidation - for name, vmEntry := range oldCache { - // if the nil cache entry has existed for 15 minutes in the cache - // then it should not be added back to the cache - if vmEntry.virtualMachine == nil || time.Since(vmEntry.lastUpdate) > 15*time.Minute { - klog.V(5).Infof("ignoring expired entries from old cache for %s", name) - continue - } - lastUpdate := time.Now().UTC() - if vmEntry.virtualMachine == nil { - // if this is already a nil entry then keep the time the nil - // entry was first created, so we can cleanup unwanted entries - lastUpdate = vmEntry.lastUpdate - } - - klog.V(5).Infof("adding old entries to new cache for %s", name) - localCache.Store(name, &vmssVirtualMachinesEntry{ - resourceGroup: vmEntry.resourceGroup, - vmssName: vmEntry.vmssName, - instanceID: vmEntry.instanceID, - virtualMachine: nil, - lastUpdate: lastUpdate, - }) - } + klog.V(5).Infof("adding old entries to new cache for %s", name) + localCache.Store(name, &vmssVirtualMachinesEntry{ + resourceGroup: vmEntry.resourceGroup, + vmssName: vmEntry.vmssName, + instanceID: vmEntry.instanceID, + virtualMachine: nil, + lastUpdate: lastUpdate, + }) } return localCache, nil @@ -203,14 +241,30 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) { } func (ss *scaleSet) deleteCacheForNode(nodeName string) error { - cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, cacheReadTypeUnsafe) + node, err := ss.getNodeIdentityByNodeName(nodeName, cacheReadTypeUnsafe) if err != nil { klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err) return err } - virtualMachines := cached.(*sync.Map) + cacheKey, timedcache, err := ss.getVMSSVMCache(node.resourceGroup, node.vmssName) + if err != nil { + klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err) + return err + } + + vmcache, err := timedcache.Get(cacheKey, cacheReadTypeUnsafe) + if err != nil { + klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err) + return err + } + virtualMachines := vmcache.(*sync.Map) virtualMachines.Delete(nodeName) + + if err := ss.gcVMSSVMCache(); err != nil { + klog.Errorf("deleteCacheForNode(%s) failed to gc stale vmss caches: %v", nodeName, err) + } + return nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 42a8e63f47..eeed0a1885 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1130,7 +1130,7 @@ gopkg.in/square/go-jose.v2/jwt gopkg.in/warnings.v0 # gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.13-k3s1 +# k8s.io/api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.14-k3s1 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -1172,7 +1172,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.13-k3s1 +# k8s.io/apiextensions-apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.14-k3s1 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install @@ -1220,7 +1220,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi k8s.io/apiextensions-apiserver/pkg/registry/customresource k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition -# k8s.io/apimachinery v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.13-k3s1 +# k8s.io/apimachinery v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.14-k3s1 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -1282,7 +1282,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.13-k3s1 +# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.14-k3s1 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/initializer @@ -1395,7 +1395,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.13-k3s1 +# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.14-k3s1 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/kustomize k8s.io/cli-runtime/pkg/kustomize/k8sdeps @@ -1408,7 +1408,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.13-k3s1 +# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.14-k3s1 k8s.io/client-go/discovery k8s.io/client-go/discovery/cached k8s.io/client-go/discovery/cached/disk @@ -1594,20 +1594,20 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.13-k3s1 +# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.14-k3s1 k8s.io/cloud-provider k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.13-k3s1 +# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.14-k3s1 k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/jws k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/tokens -# k8s.io/code-generator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.13-k3s1 +# k8s.io/code-generator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.14-k3s1 k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/generators k8s.io/code-generator/cmd/client-gen/generators/fake @@ -1622,7 +1622,7 @@ k8s.io/code-generator/cmd/lister-gen/args k8s.io/code-generator/cmd/lister-gen/generators k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util -# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.13-k3s1 +# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.14-k3s1 k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag k8s.io/component-base/config @@ -1635,10 +1635,10 @@ k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/metrics/prometheus/restclient k8s.io/component-base/metrics/prometheus/workqueue k8s.io/component-base/version -# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.13-k3s1 +# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.14-k3s1 k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.13-k3s1 +# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.14-k3s1 k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins # k8s.io/gengo v0.0.0-20190822140433-26a664648505 @@ -1653,7 +1653,7 @@ k8s.io/gengo/types k8s.io/heapster/metrics/api/v1/types # k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/kube-aggregator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.13-k3s1 +# k8s.io/kube-aggregator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.14-k3s1 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/install k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 @@ -1681,7 +1681,7 @@ k8s.io/kube-aggregator/pkg/controllers/status k8s.io/kube-aggregator/pkg/registry/apiservice k8s.io/kube-aggregator/pkg/registry/apiservice/etcd k8s.io/kube-aggregator/pkg/registry/apiservice/rest -# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.13-k3s1 +# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.14-k3s1 k8s.io/kube-controller-manager/config/v1alpha1 # k8s.io/kube-openapi v0.0.0-20200410163147-594e756bea31 k8s.io/kube-openapi/pkg/aggregator @@ -1692,11 +1692,11 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto/validation -# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.13-k3s1 +# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.14-k3s1 k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.13-k3s1 +# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.14-k3s1 k8s.io/kube-scheduler/config/v1alpha1 -# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.13-k3s1 +# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.14-k3s1 k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd k8s.io/kubectl/pkg/cmd/annotate @@ -1773,9 +1773,9 @@ k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation k8s.io/kubectl/pkg/version -# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.13-k3s1 +# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.14-k3s1 k8s.io/kubelet/config/v1beta1 -# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.16.13-k3s1 +# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.16.14-k3s1 k8s.io/kubernetes/cmd/cloud-controller-manager/app k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme @@ -2491,7 +2491,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/traverse -# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.13-k3s1 +# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.14-k3s1 k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure/auth @@ -2501,7 +2501,7 @@ k8s.io/legacy-cloud-providers/openstack/util/mount k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.13-k3s1 +# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.14-k3s1 k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2