Update to Kubernetes v1.18.17 (#3096)

* Update to Kubernetes v1.18.17

Signed-off-by: Jacob Blain Christen <jacob@rancher.com>
pull/3132/head v1.18.17-rc1+k3s1
Jacob Blain Christen 2021-03-18 16:27:18 -07:00 committed by GitHub
parent e6837cdc38
commit be19e07c70
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 438 additions and 176 deletions

52
go.mod
View File

@ -33,31 +33,31 @@ replace (
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.16-k3s1 k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.17-k3s1
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.16-k3s1 k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.17-k3s1
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.16-k3s1 k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.17-k3s1
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.16-k3s1 k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.17-k3s1
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.16-k3s1 k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.17-k3s1
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.16-k3s1 k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.17-k3s1
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.16-k3s1 k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.17-k3s1
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.16-k3s1 k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.17-k3s1
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.16-k3s1 k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.17-k3s1
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.16-k3s1 k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.17-k3s1
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.16-k3s1 k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.17-k3s1
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.16-k3s1 k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.17-k3s1
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.16-k3s1 k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.17-k3s1
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.16-k3s1 k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.17-k3s1
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.16-k3s1 k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.17-k3s1
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.16-k3s1 k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.17-k3s1
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.16-k3s1 k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.17-k3s1
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.16-k3s1 k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.17-k3s1
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.18.16-k3s1 k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.18.17-k3s1
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.16-k3s1 k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.17-k3s1
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.16-k3s1 k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.17-k3s1
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.18.16-k3s1 k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.18.17-k3s1
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.16-k3s1 k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.17-k3s1
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.16-k3s1 k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.17-k3s1
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.18.16-k3s1 k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.18.17-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
) )
@ -124,5 +124,5 @@ require (
k8s.io/component-base v0.0.0 k8s.io/component-base v0.0.0
k8s.io/cri-api v0.0.0 k8s.io/cri-api v0.0.0
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/kubernetes v1.18.16 k8s.io/kubernetes v1.18.17
) )

86
go.sum
View File

@ -458,49 +458,49 @@ github.com/k3s-io/cri v1.3.0-k3s.10 h1:K4pIza6Fnv9ucC2DigmTDHeW/v7nBT8cF2M3a1N6u
github.com/k3s-io/cri v1.3.0-k3s.10/go.mod h1:fGPUUHMKQik/vIegSe05DtX/m4miovdtvVLqRUFAkK0= github.com/k3s-io/cri v1.3.0-k3s.10/go.mod h1:fGPUUHMKQik/vIegSe05DtX/m4miovdtvVLqRUFAkK0=
github.com/k3s-io/helm-controller v0.8.3 h1:GWxavyMz7Bw2ClxH5okkeOL8o5U6IBK7uauc44SDCjU= github.com/k3s-io/helm-controller v0.8.3 h1:GWxavyMz7Bw2ClxH5okkeOL8o5U6IBK7uauc44SDCjU=
github.com/k3s-io/helm-controller v0.8.3/go.mod h1:nZP8FH3KZrNNUf5r+SwwiMR63HS6lxdHdpHijgPfF74= github.com/k3s-io/helm-controller v0.8.3/go.mod h1:nZP8FH3KZrNNUf5r+SwwiMR63HS6lxdHdpHijgPfF74=
github.com/k3s-io/kubernetes v1.18.16-k3s1 h1:VOCBgjlSMQsZhU/Qn3Y0bny3y74tBjovp6m2VclqNbc= github.com/k3s-io/kubernetes v1.18.17-k3s1 h1:aEWNG6M9xKO4GiWS4tIVl5MJfwTxeipPUBcHNuTWbjE=
github.com/k3s-io/kubernetes v1.18.16-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8= github.com/k3s-io/kubernetes v1.18.17-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.16-k3s1 h1:U+tqB0V02DqEl7zblwc0hLJKhmvubnkLGvEh/NVvGRc= github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.17-k3s1 h1:IJgRenSrZkkekLBiDgM3hj5QqesNBwzyY7JRqf890W0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.16-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w= github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.17-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.16-k3s1 h1:iG62cqadChdENW46VQutzG9Oeck5BO7lKsNbudIYwp4= github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.17-k3s1 h1:xCrfAIGPVfczZ3uMc/jtFYAkRD3e8ZkZjB4p2zEi7qI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.16-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk= github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.17-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.16-k3s1 h1:hoIFgutUJrIjo6iOF0MOQXv6zJtD3srbg4Bdn/Gl2qM= github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.17-k3s1 h1:o0GYI9I7XcxEcytF1IWrW/Rq4FyDdpn/0f1peJb3dAI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.16-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o= github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.17-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.16-k3s1 h1:v0oEi/fzkt1mqpoPCsmPKFmW8jGS2zItcojQw6WmDq8= github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.17-k3s1 h1:TXZ5tan4pqG9xusfDW9pPLCkj/QA3wQDYVyNf9R+YkI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.16-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw= github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.17-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.16-k3s1 h1:16OlDNNaI/ixfPy94CPtk7bZjICaR9Wl4Eqg4t73x8U= github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.17-k3s1 h1:T45a/inPxX+95qdjmGnisr4Ijyvpmx+dSVM7b5COV6Y=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.16-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE= github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.17-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.16-k3s1 h1:bJc5E87t0lMfd6W+vMw7swO3D8ELIWGmkzNCiKTMJD8= github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.17-k3s1 h1:UPKTrF4FjqFTrEkd8pvduw0g5yIOHNBD6gHyEy1Xfn8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.16-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos= github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.17-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.16-k3s1 h1:eiavvS0HQcpkepHwQE+x7TxjWzNYYHzxXupYFFBodus= github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.17-k3s1 h1:OonA8ZKvydfSm2HcYvB6Ne1upiFgq2IDGRX752zaeWY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.16-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI= github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.17-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.16-k3s1 h1:7fXjYEQAriIqwuQE39xgZnMjzIRmBC6hPm134mT8LsA= github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.17-k3s1 h1:/QPpIUjnJ5hX6umchQADWukFbkhSsgip9fEd6p1CQUg=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.16-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8= github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.17-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.16-k3s1 h1:fmow5tJk9U0l7g1Et5Pzz5vj09P6IEKa/fjAXyL5ZQY= github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.17-k3s1 h1:fvYov/ZqGon3PykIx1KuX7pC1jhgDgVqkDUhKYFLjCo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.16-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw= github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.17-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.16-k3s1 h1:TNKQ5XZ11bKFxXomanfTA4DbWz7MqAUPhFF+8T/tqTw= github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.17-k3s1 h1:JJpfQis7UR9DnUEmyU0MPEnj8FX4udhmHWZyl+zm8q4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.16-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q= github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.17-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.16-k3s1 h1:z0SjsT0qnzA1u2HqNtFeeXZer6TDhiY0ov7caLogwng= github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.17-k3s1 h1:ULstuZSeDU2epP3nIy7253QUUplsRmJ6zQ4+v2fgHtg=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.16-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk= github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.17-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.16-k3s1 h1:9ytp165C2ywtg3pZsdy4fyI2nlWDfVkIlWsHj5Z+tNM= github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.17-k3s1 h1:+xnDFPa+ta7eIO/RbB+bqtjX3Jqo55T0Yh83tWxjxLw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.16-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE= github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.17-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.16-k3s1 h1:3BJ4AzNapPb5XumVEBX53/HAmhnqsuv0GuCKE1sOxEU= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.17-k3s1 h1:+eOE/InruZ0UodXyg7PVi0tghuACzcJdKLF2fEQwqHk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.16-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.17-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.16-k3s1 h1:+0T2TH6YJGs/VSsXiPfU7ew1BJNSvrFRAw+H66jpMuU= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.17-k3s1 h1:hZCDLua0UzVUqJ71Mt+g86DbvUgh4EghDadtA3vmHTQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.16-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.17-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.16-k3s1 h1:kvIfXmOsTwglK9EW9nhgIThQJZ/K2o75v3XmSlc19cM= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.17-k3s1 h1:VZz4vgC75pV2fTesjEOFxzoFSRrFiCjM0iUN7bWagLo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.16-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.17-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.16-k3s1 h1:fpXQAwgsxQtVzEYXcJJ+I0y2XfmPZTND5+R9jOlL0Ag= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.17-k3s1 h1:hWI6zvGtQBu3K5FrTt/jsaere1eWsmy+yxezM1gl+E4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.16-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.17-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.16-k3s1 h1:ZNU6UhplF3o9bQgFW/kAxgzIYuQyJc5sy8gd7WfuCx0= github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.17-k3s1 h1:gPfueOTl2jnOydDE3E6Q2cGmNLqPamxbeMZTlL5S+tQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.16-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA= github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.17-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.16-k3s1 h1:mEcew1xAHJjG7wGlXE88R5kz9NcIPNZI361WhPDHDIY= github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.17-k3s1 h1:vY7kj5eiBF2oYg17Ctc4F0h7gjElpyqNrEYKFGUjlSk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.16-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew= github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.17-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.16-k3s1 h1:9siB+L5MHHZs/IbN9NV43WYkvr9kLMjeFIEErfigf+w= github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.17-k3s1 h1:46yHQwIYfcxmgBwQ9nfmZ03hnPAn1lPIS8uyN9P2Br4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.16-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA= github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.17-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.16-k3s1 h1:7RC1767UxORogidkHORJTBi8pOjTWx6kD6wAi14oaj0= github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.17-k3s1 h1:3MSKcheADkTCFwhtfgCBpjF11SG0ScB3Si68tjeDcGE=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.16-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y= github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.17-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.16-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo= github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.17-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
github.com/karrick/godirwalk v1.7.5 h1:VbzFqwXwNbAZoA6W5odrLr+hKK197CcENcPh6E/gJ0M= github.com/karrick/godirwalk v1.7.5 h1:VbzFqwXwNbAZoA6W5odrLr+hKK197CcENcPh6E/gJ0M=
github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=

View File

@ -96,9 +96,11 @@ func SerializeObject(mediaType string, encoder runtime.Encoder, hw http.Response
err := encoder.Encode(object, w) err := encoder.Encode(object, w)
if err == nil { if err == nil {
err = w.Close() err = w.Close()
if err == nil { if err != nil {
return // we cannot write an error to the writer anymore as the Encode call was successful.
utilruntime.HandleError(fmt.Errorf("apiserver was unable to close cleanly the response writer: %v", err))
} }
return
} }
// make a best effort to write the object if a failure is detected // make a best effort to write the object if a failure is detected

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "18" gitMinor = "18"
gitVersion = "v1.18.16-k3s1" gitVersion = "v1.18.17-k3s1"
gitCommit = "97eee15c8b5c26daccd339bb0ed95a777e1c85a6" gitCommit = "5e921367c056a418acd96c753ea0c526c46c7433"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2021-02-18T20:13:50Z" buildDate = "2021-03-18T21:25:26Z"
) )

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "18" gitMinor = "18"
gitVersion = "v1.18.16-k3s1" gitVersion = "v1.18.17-k3s1"
gitCommit = "97eee15c8b5c26daccd339bb0ed95a777e1c85a6" gitCommit = "5e921367c056a418acd96c753ea0c526c46c7433"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2021-02-18T20:13:50Z" buildDate = "2021-03-18T21:25:26Z"
) )

View File

@ -24,6 +24,7 @@ import (
"io" "io"
"regexp" "regexp"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -337,12 +338,14 @@ func (o LogsOptions) parallelConsumeRequest(requests map[corev1.ObjectReference]
reader, writer := io.Pipe() reader, writer := io.Pipe()
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
wg.Add(len(requests)) wg.Add(len(requests))
closedWithError := int32(0)
for objRef, request := range requests { for objRef, request := range requests {
go func(objRef corev1.ObjectReference, request rest.ResponseWrapper) { go func(objRef corev1.ObjectReference, request rest.ResponseWrapper) {
defer wg.Done() defer wg.Done()
out := o.addPrefixIfNeeded(objRef, writer) out := o.addPrefixIfNeeded(objRef, writer)
if err := o.ConsumeRequestFn(request, out); err != nil { if err := o.ConsumeRequestFn(request, out); err != nil {
if !o.IgnoreLogErrors { if !o.IgnoreLogErrors {
atomic.StoreInt32(&closedWithError, 1)
writer.CloseWithError(err) writer.CloseWithError(err)
// It's important to return here to propagate the error via the pipe // It's important to return here to propagate the error via the pipe
@ -357,7 +360,9 @@ func (o LogsOptions) parallelConsumeRequest(requests map[corev1.ObjectReference]
go func() { go func() {
wg.Wait() wg.Wait()
if atomic.LoadInt32(&closedWithError) == 0 {
writer.Close() writer.Close()
}
}() }()
_, err := io.Copy(o.Out, reader) _, err := io.Copy(o.Out, reader)

View File

@ -85,11 +85,23 @@ func VisitContainers(podSpec *api.PodSpec, mask ContainerType, visitor Container
// Visitor is called with each object name, and returns true if visiting should continue // Visitor is called with each object name, and returns true if visiting should continue
type Visitor func(name string) (shouldContinue bool) type Visitor func(name string) (shouldContinue bool)
func skipEmptyNames(visitor Visitor) Visitor {
return func(name string) bool {
if len(name) == 0 {
// continue visiting
return true
}
// delegate to visitor
return visitor(name)
}
}
// VisitPodSecretNames invokes the visitor function with the name of every secret // VisitPodSecretNames invokes the visitor function with the name of every secret
// referenced by the pod spec. If visitor returns false, visiting is short-circuited. // referenced by the pod spec. If visitor returns false, visiting is short-circuited.
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. // Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited. // Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodSecretNames(pod *api.Pod, visitor Visitor) bool { func VisitPodSecretNames(pod *api.Pod, visitor Visitor) bool {
visitor = skipEmptyNames(visitor)
for _, reference := range pod.Spec.ImagePullSecrets { for _, reference := range pod.Spec.ImagePullSecrets {
if !visitor(reference.Name) { if !visitor(reference.Name) {
return false return false
@ -178,6 +190,7 @@ func visitContainerSecretNames(container *api.Container, visitor Visitor) bool {
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. // Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited. // Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodConfigmapNames(pod *api.Pod, visitor Visitor) bool { func VisitPodConfigmapNames(pod *api.Pod, visitor Visitor) bool {
visitor = skipEmptyNames(visitor)
VisitContainers(&pod.Spec, AllContainers, func(c *api.Container, containerType ContainerType) bool { VisitContainers(&pod.Spec, AllContainers, func(c *api.Container, containerType ContainerType) bool {
return visitContainerConfigmapNames(c, visitor) return visitContainerConfigmapNames(c, visitor)
}) })

View File

@ -30,10 +30,22 @@ func getClaimRefNamespace(pv *corev1.PersistentVolume) string {
// Visitor is called with each object's namespace and name, and returns true if visiting should continue // Visitor is called with each object's namespace and name, and returns true if visiting should continue
type Visitor func(namespace, name string, kubeletVisible bool) (shouldContinue bool) type Visitor func(namespace, name string, kubeletVisible bool) (shouldContinue bool)
func skipEmptyNames(visitor Visitor) Visitor {
return func(namespace, name string, kubeletVisible bool) bool {
if len(name) == 0 {
// continue visiting
return true
}
// delegate to visitor
return visitor(namespace, name, kubeletVisible)
}
}
// VisitPVSecretNames invokes the visitor function with the name of every secret // VisitPVSecretNames invokes the visitor function with the name of every secret
// referenced by the PV spec. If visitor returns false, visiting is short-circuited. // referenced by the PV spec. If visitor returns false, visiting is short-circuited.
// Returns true if visiting completed, false if visiting was short-circuited. // Returns true if visiting completed, false if visiting was short-circuited.
func VisitPVSecretNames(pv *corev1.PersistentVolume, visitor Visitor) bool { func VisitPVSecretNames(pv *corev1.PersistentVolume, visitor Visitor) bool {
visitor = skipEmptyNames(visitor)
source := &pv.Spec.PersistentVolumeSource source := &pv.Spec.PersistentVolumeSource
switch { switch {
case source.AzureFile != nil: case source.AzureFile != nil:

View File

@ -82,11 +82,23 @@ func VisitContainers(podSpec *v1.PodSpec, visitor ContainerVisitor) bool {
// Visitor is called with each object name, and returns true if visiting should continue // Visitor is called with each object name, and returns true if visiting should continue
type Visitor func(name string) (shouldContinue bool) type Visitor func(name string) (shouldContinue bool)
func skipEmptyNames(visitor Visitor) Visitor {
return func(name string) bool {
if len(name) == 0 {
// continue visiting
return true
}
// delegate to visitor
return visitor(name)
}
}
// VisitPodSecretNames invokes the visitor function with the name of every secret // VisitPodSecretNames invokes the visitor function with the name of every secret
// referenced by the pod spec. If visitor returns false, visiting is short-circuited. // referenced by the pod spec. If visitor returns false, visiting is short-circuited.
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. // Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited. // Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool { func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool {
visitor = skipEmptyNames(visitor)
for _, reference := range pod.Spec.ImagePullSecrets { for _, reference := range pod.Spec.ImagePullSecrets {
if !visitor(reference.Name) { if !visitor(reference.Name) {
return false return false
@ -176,6 +188,7 @@ func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool {
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. // Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited. // Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool { func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool {
visitor = skipEmptyNames(visitor)
VisitContainers(&pod.Spec, func(c *v1.Container) bool { VisitContainers(&pod.Spec, func(c *v1.Container) bool {
return visitContainerConfigmapNames(c, visitor) return visitContainerConfigmapNames(c, visitor)
}) })

View File

@ -6,6 +6,7 @@ go_library(
"endpointset.go", "endpointset.go",
"endpointslice_controller.go", "endpointslice_controller.go",
"endpointslice_tracker.go", "endpointslice_tracker.go",
"errors.go",
"reconciler.go", "reconciler.go",
"utils.go", "utils.go",
], ],
@ -64,6 +65,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library",

View File

@ -321,6 +321,10 @@ func (c *Controller) syncService(key string) error {
return err return err
} }
if c.endpointSliceTracker.StaleSlices(service, endpointSlices) {
return &StaleInformerCache{"EndpointSlice informer cache is out of date"}
}
// We call ComputeEndpointLastChangeTriggerTime here to make sure that the // We call ComputeEndpointLastChangeTriggerTime here to make sure that the
// state of the trigger time tracker gets updated even if the sync turns out // state of the trigger time tracker gets updated even if the sync turns out
// to be no-op and we don't update the EndpointSlice objects. // to be no-op and we don't update the EndpointSlice objects.
@ -370,7 +374,7 @@ func (c *Controller) onEndpointSliceAdd(obj interface{}) {
utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceAdd()")) utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceAdd()"))
return return
} }
if managedByController(endpointSlice) && c.endpointSliceTracker.Stale(endpointSlice) { if managedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice) {
c.queueServiceForEndpointSlice(endpointSlice) c.queueServiceForEndpointSlice(endpointSlice)
} }
} }
@ -386,7 +390,18 @@ func (c *Controller) onEndpointSliceUpdate(prevObj, obj interface{}) {
utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceUpdate()")) utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceUpdate()"))
return return
} }
if managedByChanged(prevEndpointSlice, endpointSlice) || (managedByController(endpointSlice) && c.endpointSliceTracker.Stale(endpointSlice)) { // EndpointSlice generation does not change when labels change. Although the
// controller will never change LabelServiceName, users might. This check
// ensures that we handle changes to this label.
svcName := endpointSlice.Labels[discovery.LabelServiceName]
prevSvcName := prevEndpointSlice.Labels[discovery.LabelServiceName]
if svcName != prevSvcName {
klog.Warningf("%s label changed from %s to %s for %s", discovery.LabelServiceName, prevSvcName, svcName, endpointSlice.Name)
c.queueServiceForEndpointSlice(endpointSlice)
c.queueServiceForEndpointSlice(prevEndpointSlice)
return
}
if managedByChanged(prevEndpointSlice, endpointSlice) || (managedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice)) {
c.queueServiceForEndpointSlice(endpointSlice) c.queueServiceForEndpointSlice(endpointSlice)
} }
} }
@ -397,8 +412,12 @@ func (c *Controller) onEndpointSliceUpdate(prevObj, obj interface{}) {
func (c *Controller) onEndpointSliceDelete(obj interface{}) { func (c *Controller) onEndpointSliceDelete(obj interface{}) {
endpointSlice := getEndpointSliceFromDeleteAction(obj) endpointSlice := getEndpointSliceFromDeleteAction(obj)
if endpointSlice != nil && managedByController(endpointSlice) && c.endpointSliceTracker.Has(endpointSlice) { if endpointSlice != nil && managedByController(endpointSlice) && c.endpointSliceTracker.Has(endpointSlice) {
// This returns false if we didn't expect the EndpointSlice to be
// deleted. If that is the case, we queue the Service for another sync.
if !c.endpointSliceTracker.HandleDeletion(endpointSlice) {
c.queueServiceForEndpointSlice(endpointSlice) c.queueServiceForEndpointSlice(endpointSlice)
} }
}
} }
// queueServiceForEndpointSlice attempts to queue the corresponding Service for // queueServiceForEndpointSlice attempts to queue the corresponding Service for

View File

@ -19,87 +19,154 @@ package endpointslice
import ( import (
"sync" "sync"
"k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1beta1" discovery "k8s.io/api/discovery/v1beta1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
) )
// endpointSliceResourceVersions tracks expected EndpointSlice resource versions const (
// by EndpointSlice name. deletionExpected = -1
type endpointSliceResourceVersions map[string]string )
// endpointSliceTracker tracks EndpointSlices and their associated resource // generationsBySlice tracks expected EndpointSlice generations by EndpointSlice
// versions to help determine if a change to an EndpointSlice has been processed // uid. A value of deletionExpected (-1) may be used here to indicate that we
// by the EndpointSlice controller. // expect this EndpointSlice to be deleted.
type generationsBySlice map[types.UID]int64
// endpointSliceTracker tracks EndpointSlices and their associated generation to
// help determine if a change to an EndpointSlice has been processed by the
// EndpointSlice controller.
type endpointSliceTracker struct { type endpointSliceTracker struct {
// lock protects resourceVersionsByService. // lock protects generationsByService.
lock sync.Mutex lock sync.Mutex
// resourceVersionsByService tracks the list of EndpointSlices and // generationsByService tracks the generations of EndpointSlices for each
// associated resource versions expected for a given Service. // Service.
resourceVersionsByService map[types.NamespacedName]endpointSliceResourceVersions generationsByService map[types.NamespacedName]generationsBySlice
} }
// newEndpointSliceTracker creates and initializes a new endpointSliceTracker. // newEndpointSliceTracker creates and initializes a new endpointSliceTracker.
func newEndpointSliceTracker() *endpointSliceTracker { func newEndpointSliceTracker() *endpointSliceTracker {
return &endpointSliceTracker{ return &endpointSliceTracker{
resourceVersionsByService: map[types.NamespacedName]endpointSliceResourceVersions{}, generationsByService: map[types.NamespacedName]generationsBySlice{},
} }
} }
// Has returns true if the endpointSliceTracker has a resource version for the // Has returns true if the endpointSliceTracker has a generation for the
// provided EndpointSlice. // provided EndpointSlice.
func (est *endpointSliceTracker) Has(endpointSlice *discovery.EndpointSlice) bool { func (est *endpointSliceTracker) Has(endpointSlice *discovery.EndpointSlice) bool {
est.lock.Lock() est.lock.Lock()
defer est.lock.Unlock() defer est.lock.Unlock()
rrv := est.relatedResourceVersions(endpointSlice) gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
_, ok := rrv[endpointSlice.Name] if !ok {
return false
}
_, ok = gfs[endpointSlice.UID]
return ok return ok
} }
// Stale returns true if this endpointSliceTracker does not have a resource // ShouldSync returns true if this endpointSliceTracker does not have a
// version for the provided EndpointSlice or it does not match the resource // generation for the provided EndpointSlice or it is greater than the
// version of the provided EndpointSlice. // generation of the tracked EndpointSlice.
func (est *endpointSliceTracker) Stale(endpointSlice *discovery.EndpointSlice) bool { func (est *endpointSliceTracker) ShouldSync(endpointSlice *discovery.EndpointSlice) bool {
est.lock.Lock() est.lock.Lock()
defer est.lock.Unlock() defer est.lock.Unlock()
rrv := est.relatedResourceVersions(endpointSlice) gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
return rrv[endpointSlice.Name] != endpointSlice.ResourceVersion if !ok {
return true
}
g, ok := gfs[endpointSlice.UID]
return !ok || endpointSlice.Generation > g
} }
// Update adds or updates the resource version in this endpointSliceTracker for // StaleSlices returns true if one or more of the provided EndpointSlices
// the provided EndpointSlice. // have older generations than the corresponding tracked ones or if the tracker
// is expecting one or more of the provided EndpointSlices to be deleted.
func (est *endpointSliceTracker) StaleSlices(service *v1.Service, endpointSlices []*discovery.EndpointSlice) bool {
est.lock.Lock()
defer est.lock.Unlock()
nn := types.NamespacedName{Name: service.Name, Namespace: service.Namespace}
gfs, ok := est.generationsByService[nn]
if !ok {
return false
}
for _, endpointSlice := range endpointSlices {
g, ok := gfs[endpointSlice.UID]
if ok && (g == deletionExpected || g > endpointSlice.Generation) {
return true
}
}
return false
}
// Update adds or updates the generation in this endpointSliceTracker for the
// provided EndpointSlice.
func (est *endpointSliceTracker) Update(endpointSlice *discovery.EndpointSlice) { func (est *endpointSliceTracker) Update(endpointSlice *discovery.EndpointSlice) {
est.lock.Lock() est.lock.Lock()
defer est.lock.Unlock() defer est.lock.Unlock()
rrv := est.relatedResourceVersions(endpointSlice) gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
rrv[endpointSlice.Name] = endpointSlice.ResourceVersion
if !ok {
gfs = generationsBySlice{}
est.generationsByService[getServiceNN(endpointSlice)] = gfs
}
gfs[endpointSlice.UID] = endpointSlice.Generation
} }
// Delete removes the resource version in this endpointSliceTracker for the // DeleteService removes the set of generations tracked for the Service.
// provided EndpointSlice. func (est *endpointSliceTracker) DeleteService(namespace, name string) {
func (est *endpointSliceTracker) Delete(endpointSlice *discovery.EndpointSlice) {
est.lock.Lock() est.lock.Lock()
defer est.lock.Unlock() defer est.lock.Unlock()
rrv := est.relatedResourceVersions(endpointSlice) serviceNN := types.NamespacedName{Name: name, Namespace: namespace}
delete(rrv, endpointSlice.Name) delete(est.generationsByService, serviceNN)
} }
// relatedResourceVersions returns the set of resource versions tracked for the // ExpectDeletion sets the generation to deletionExpected in this
// Service corresponding to the provided EndpointSlice. If no resource versions // endpointSliceTracker for the provided EndpointSlice.
// are currently tracked for this service, an empty set is initialized. func (est *endpointSliceTracker) ExpectDeletion(endpointSlice *discovery.EndpointSlice) {
func (est *endpointSliceTracker) relatedResourceVersions(endpointSlice *discovery.EndpointSlice) endpointSliceResourceVersions { est.lock.Lock()
serviceNN := getServiceNN(endpointSlice) defer est.lock.Unlock()
vers, ok := est.resourceVersionsByService[serviceNN]
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
if !ok { if !ok {
vers = endpointSliceResourceVersions{} gfs = generationsBySlice{}
est.resourceVersionsByService[serviceNN] = vers est.generationsByService[getServiceNN(endpointSlice)] = gfs
}
gfs[endpointSlice.UID] = deletionExpected
}
// HandleDeletion removes the generation in this endpointSliceTracker for the
// provided EndpointSlice. This returns true if the tracker expected this
// EndpointSlice to be deleted and false if not.
func (est *endpointSliceTracker) HandleDeletion(endpointSlice *discovery.EndpointSlice) bool {
est.lock.Lock()
defer est.lock.Unlock()
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
if ok {
g, ok := gfs[endpointSlice.UID]
delete(gfs, endpointSlice.UID)
if ok && g != deletionExpected {
return false
}
} }
return vers return true
}
// generationsForSliceUnsafe returns the generations for the Service
// corresponding to the provided EndpointSlice, and a bool to indicate if it
// exists. A lock must be applied before calling this function.
func (est *endpointSliceTracker) generationsForSliceUnsafe(endpointSlice *discovery.EndpointSlice) (generationsBySlice, bool) {
serviceNN := getServiceNN(endpointSlice)
generations, ok := est.generationsByService[serviceNN]
return generations, ok
} }
// getServiceNN returns a namespaced name for the Service corresponding to the // getServiceNN returns a namespaced name for the Service corresponding to the

View File

@ -0,0 +1,30 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpointslice
// StaleInformerCache errors indicate that the informer cache includes out of
// date resources.
type StaleInformerCache struct {
msg string
}
func (e *StaleInformerCache) Error() string { return e.msg }
func isStaleInformerCacheErr(err error) bool {
_, ok := err.(*StaleInformerCache)
return ok
}

View File

@ -237,7 +237,7 @@ func (r *reconciler) finalize(
if err != nil { if err != nil {
return fmt.Errorf("failed to delete %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err) return fmt.Errorf("failed to delete %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)
} }
r.endpointSliceTracker.Delete(endpointSlice) r.endpointSliceTracker.ExpectDeletion(endpointSlice)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc() metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
} }

View File

@ -872,7 +872,7 @@ func (nc *Controller) processTaintBaseEviction(node *v1.Node, observedReadyCondi
if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) { if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) {
klog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.") klog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.")
} }
} else if nc.markNodeForTainting(node) { } else if nc.markNodeForTainting(node, v1.ConditionFalse) {
klog.V(2).Infof("Node %v is NotReady as of %v. Adding it to the Taint queue.", klog.V(2).Infof("Node %v is NotReady as of %v. Adding it to the Taint queue.",
node.Name, node.Name,
decisionTimestamp, decisionTimestamp,
@ -885,7 +885,7 @@ func (nc *Controller) processTaintBaseEviction(node *v1.Node, observedReadyCondi
if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) { if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) {
klog.Errorf("Failed to instantly swap NotReadyTaint to UnreachableTaint. Will try again in the next cycle.") klog.Errorf("Failed to instantly swap NotReadyTaint to UnreachableTaint. Will try again in the next cycle.")
} }
} else if nc.markNodeForTainting(node) { } else if nc.markNodeForTainting(node, v1.ConditionUnknown) {
klog.V(2).Infof("Node %v is unresponsive as of %v. Adding it to the Taint queue.", klog.V(2).Infof("Node %v is unresponsive as of %v. Adding it to the Taint queue.",
node.Name, node.Name,
decisionTimestamp, decisionTimestamp,
@ -1476,9 +1476,21 @@ func (nc *Controller) evictPods(node *v1.Node, pods []*v1.Pod) (bool, error) {
return nc.zonePodEvictor[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID)), nil return nc.zonePodEvictor[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID)), nil
} }
func (nc *Controller) markNodeForTainting(node *v1.Node) bool { func (nc *Controller) markNodeForTainting(node *v1.Node, status v1.ConditionStatus) bool {
nc.evictorLock.Lock() nc.evictorLock.Lock()
defer nc.evictorLock.Unlock() defer nc.evictorLock.Unlock()
if status == v1.ConditionFalse {
if !taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) {
nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name)
}
}
if status == v1.ConditionUnknown {
if !taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) {
nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name)
}
}
return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID)) return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID))
} }

View File

@ -346,17 +346,46 @@ func (kl *Kubelet) getMountedVolumePathListFromDisk(podUID types.UID) ([]string,
return mountedVolumes, nil return mountedVolumes, nil
} }
// podVolumesSubpathsDirExists returns true if the pod volume-subpaths directory for // getPodVolumeSubpathListFromDisk returns a list of the volume-subpath paths by reading the
// a given pod exists // subpath directories for the given pod from the disk.
func (kl *Kubelet) podVolumeSubpathsDirExists(podUID types.UID) (bool, error) { func (kl *Kubelet) getPodVolumeSubpathListFromDisk(podUID types.UID) ([]string, error) {
podVolDir := kl.getPodVolumeSubpathsDir(podUID) volumes := []string{}
podSubpathsDir := kl.getPodVolumeSubpathsDir(podUID)
if pathExists, pathErr := mount.PathExists(podVolDir); pathErr != nil { if pathExists, pathErr := mount.PathExists(podSubpathsDir); pathErr != nil {
return true, fmt.Errorf("error checking if path %q exists: %v", podVolDir, pathErr) return nil, fmt.Errorf("error checking if path %q exists: %v", podSubpathsDir, pathErr)
} else if !pathExists { } else if !pathExists {
return false, nil return volumes, nil
} }
return true, nil
// Explicitly walks /<volume>/<container name>/<subPathIndex>
volumePluginDirs, err := ioutil.ReadDir(podSubpathsDir)
if err != nil {
klog.Errorf("Could not read directory %s: %v", podSubpathsDir, err)
return volumes, err
}
for _, volumePluginDir := range volumePluginDirs {
volumePluginName := volumePluginDir.Name()
volumePluginPath := filepath.Join(podSubpathsDir, volumePluginName)
containerDirs, err := ioutil.ReadDir(volumePluginPath)
if err != nil {
return volumes, fmt.Errorf("could not read directory %s: %v", volumePluginPath, err)
}
for _, containerDir := range containerDirs {
containerName := containerDir.Name()
containerPath := filepath.Join(volumePluginPath, containerName)
// Switch to ReadDirNoStat at the subPathIndex level to prevent issues with stat'ing
// mount points that may not be responsive
subPaths, err := utilpath.ReadDirNoStat(containerPath)
if err != nil {
return volumes, fmt.Errorf("could not read directory %s: %v", containerPath, err)
}
for _, subPathDir := range subPaths {
volumes = append(volumes, filepath.Join(containerPath, subPathDir))
}
}
}
return volumes, nil
} }
// GetVersionInfo returns information about the version of cAdvisor in use. // GetVersionInfo returns information about the version of cAdvisor in use.

View File

@ -18,6 +18,7 @@ package kubelet
import ( import (
"fmt" "fmt"
"syscall"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
@ -120,25 +121,49 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
klog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid) klog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid)
continue continue
} }
// If there are still volume directories, do not delete directory
allVolumesCleanedUp := true
// If there are still volume directories, attempt to rmdir them
volumePaths, err := kl.getPodVolumePathListFromDisk(uid) volumePaths, err := kl.getPodVolumePathListFromDisk(uid)
if err != nil { if err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err)) orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err))
continue continue
} }
if len(volumePaths) > 0 { if len(volumePaths) > 0 {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but volume paths are still present on disk", uid)) for _, volumePath := range volumePaths {
continue if err := syscall.Rmdir(volumePath); err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() volume at path %v: %v", uid, volumePath, err))
allVolumesCleanedUp = false
} else {
klog.Warningf("Cleaned up orphaned volume from pod %q at %s", uid, volumePath)
}
}
} }
// If there are any volume-subpaths, do not cleanup directories // If there are any volume-subpaths, attempt to rmdir them
volumeSubpathExists, err := kl.podVolumeSubpathsDirExists(uid) subpathVolumePaths, err := kl.getPodVolumeSubpathListFromDisk(uid)
if err != nil { if err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err)) orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err))
continue continue
} }
if volumeSubpathExists { if len(subpathVolumePaths) > 0 {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but volume subpaths are still present on disk", uid)) for _, subpathVolumePath := range subpathVolumePaths {
if err := syscall.Rmdir(subpathVolumePath); err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() subpath at path %v: %v", uid, subpathVolumePath, err))
allVolumesCleanedUp = false
} else {
klog.Warningf("Cleaned up orphaned volume subpath from pod %q at %s", uid, subpathVolumePath)
}
}
}
if !allVolumesCleanedUp {
// Not all volumes were removed, so don't clean up the pod directory yet. It is likely
// that there are still mountpoints left which could stall RemoveAllOneFilesystem which
// would otherwise be called below.
// Errors for all removal operations have already been recorded, so don't add another
// one here.
continue continue
} }

View File

@ -46,6 +46,7 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/features:go_default_library",
"//pkg/quota/v1:go_default_library", "//pkg/quota/v1:go_default_library",
"//pkg/quota/v1/generic:go_default_library", "//pkg/quota/v1/generic:go_default_library",
"//pkg/util/node:go_default_library", "//pkg/util/node:go_default_library",
@ -54,6 +55,8 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
], ],
) )

View File

@ -30,10 +30,12 @@ import (
"k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/util/feature"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
quota "k8s.io/kubernetes/pkg/quota/v1" quota "k8s.io/kubernetes/pkg/quota/v1"
"k8s.io/kubernetes/pkg/quota/v1/generic" "k8s.io/kubernetes/pkg/quota/v1/generic"
) )
@ -351,6 +353,10 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, e
limits = quota.Max(limits, pod.Spec.InitContainers[i].Resources.Limits) limits = quota.Max(limits, pod.Spec.InitContainers[i].Resources.Limits)
} }
if feature.DefaultFeatureGate.Enabled(features.PodOverhead) {
requests = quota.Add(requests, pod.Spec.Overhead)
limits = quota.Add(limits, pod.Spec.Overhead)
}
result = quota.Add(result, podComputeUsageHelper(requests, limits)) result = quota.Add(result, podComputeUsageHelper(requests, limits))
return result, nil return result, nil
} }

View File

@ -313,13 +313,8 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
volDataKey.volHandle: csiSource.VolumeHandle, volDataKey.volHandle: csiSource.VolumeHandle,
volDataKey.driverName: csiSource.Driver, volDataKey.driverName: csiSource.Driver,
} }
if err = saveVolumeData(dataDir, volDataFileName, data); err != nil {
klog.Error(log("failed to save volume info data: %v", err)) err = saveVolumeData(dataDir, volDataFileName, data)
if cleanErr := os.RemoveAll(dataDir); cleanErr != nil {
klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanErr))
}
return err
}
defer func() { defer func() {
// Only if there was an error and volume operation was considered // Only if there was an error and volume operation was considered
// finished, we should remove the directory. // finished, we should remove the directory.
@ -332,6 +327,12 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
} }
}() }()
if err != nil {
errMsg := log("failed to save volume info data: %v", err)
klog.Error(errMsg)
return errors.New(errMsg)
}
if !stageUnstageSet { if !stageUnstageSet {
klog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) klog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there. // defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.

View File

@ -42,6 +42,7 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager" "k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
) )
const ( const (
@ -430,11 +431,23 @@ func (p *csiPlugin) NewMounter(
attachID := getAttachmentName(volumeHandle, driverName, node) attachID := getAttachmentName(volumeHandle, driverName, node)
volData[volDataKey.attachmentID] = attachID volData[volDataKey.attachmentID] = attachID
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil { err = saveVolumeData(dataDir, volDataFileName, volData)
if removeErr := os.RemoveAll(dataDir); removeErr != nil { defer func() {
klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, removeErr)) // Only if there was an error and volume operation was considered
// finished, we should remove the directory.
if err != nil && volumetypes.IsOperationFinishedError(err) {
// attempt to cleanup volume mount dir.
if err = removeMountDir(p, dir); err != nil {
klog.Error(log("attacher.MountDevice failed to remove mount dir after error [%s]: %v", dir, err))
} }
return nil, errors.New(log("failed to save volume info data: %v", err)) }
}()
if err != nil {
errorMsg := log("csi.NewMounter failed to save volume info data: %v", err)
klog.Error(errorMsg)
return nil, errors.New(errorMsg)
} }
klog.V(4).Info(log("mounter created successfully")) klog.V(4).Info(log("mounter created successfully"))
@ -675,11 +688,21 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt
volDataKey.attachmentID: attachID, volDataKey.attachmentID: attachID,
} }
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil { err = saveVolumeData(dataDir, volDataFileName, volData)
if removeErr := os.RemoveAll(dataDir); removeErr != nil { defer func() {
klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, removeErr)) // Only if there was an error and volume operation was considered
// finished, we should remove the directory.
if err != nil && volumetypes.IsOperationFinishedError(err) {
// attempt to cleanup volume mount dir.
if err = removeMountDir(p, dataDir); err != nil {
klog.Error(log("attacher.MountDevice failed to remove mount dir after error [%s]: %v", dataDir, err))
} }
return nil, errors.New(log("failed to save volume info data: %v", err)) }
}()
if err != nil {
errorMsg := log("csi.NewBlockVolumeMapper failed to save volume info data: %v", err)
klog.Error(errorMsg)
return nil, errors.New(errorMsg)
} }
return mapper, nil return mapper, nil

View File

@ -75,7 +75,7 @@ func getUpperPath(path string) string {
// Check whether a directory/file is a link type or not // Check whether a directory/file is a link type or not
// LinkType could be SymbolicLink, Junction, or HardLink // LinkType could be SymbolicLink, Junction, or HardLink
func isLinkPath(path string) (bool, error) { func isLinkPath(path string) (bool, error) {
cmd := fmt.Sprintf("(Get-Item -Path %s).LinkType", path) cmd := fmt.Sprintf("(Get-Item -LiteralPath %q).LinkType", path)
output, err := exec.Command("powershell", "/c", cmd).CombinedOutput() output, err := exec.Command("powershell", "/c", cmd).CombinedOutput()
if err != nil { if err != nil {
return false, err return false, err
@ -113,7 +113,7 @@ func evalSymlink(path string) (string, error) {
} }
} }
// This command will give the target path of a given symlink // This command will give the target path of a given symlink
cmd := fmt.Sprintf("(Get-Item -Path %s).Target", upperpath) cmd := fmt.Sprintf("(Get-Item -LiteralPath %q).Target", upperpath)
output, err := exec.Command("powershell", "/c", cmd).CombinedOutput() output, err := exec.Command("powershell", "/c", cmd).CombinedOutput()
if err != nil { if err != nil {
return "", err return "", err

42
vendor/modules.txt vendored
View File

@ -1145,7 +1145,7 @@ gopkg.in/square/go-jose.v2/jwt
gopkg.in/warnings.v0 gopkg.in/warnings.v0
# gopkg.in/yaml.v2 v2.2.8 # gopkg.in/yaml.v2 v2.2.8
gopkg.in/yaml.v2 gopkg.in/yaml.v2
# k8s.io/api v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.16-k3s1 # k8s.io/api v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.17-k3s1
k8s.io/api/admission/v1 k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1 k8s.io/api/admission/v1beta1
k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1
@ -1189,7 +1189,7 @@ k8s.io/api/settings/v1alpha1
k8s.io/api/storage/v1 k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1 k8s.io/api/storage/v1beta1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.16-k3s1 # k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.17-k3s1
k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apihelpers
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
@ -1229,7 +1229,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
k8s.io/apiextensions-apiserver/pkg/registry/customresource k8s.io/apiextensions-apiserver/pkg/registry/customresource
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
# k8s.io/apimachinery v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.16-k3s1 # k8s.io/apimachinery v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.17-k3s1
k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/meta
@ -1291,7 +1291,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.16-k3s1 # k8s.io/apiserver v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.17-k3s1
k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/configuration
k8s.io/apiserver/pkg/admission/initializer k8s.io/apiserver/pkg/admission/initializer
@ -1421,7 +1421,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook
# k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.16-k3s1 # k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.17-k3s1
k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/kustomize k8s.io/cli-runtime/pkg/kustomize
k8s.io/cli-runtime/pkg/kustomize/k8sdeps k8s.io/cli-runtime/pkg/kustomize/k8sdeps
@ -1434,7 +1434,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource k8s.io/cli-runtime/pkg/resource
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.16-k3s1 # k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.17-k3s1
k8s.io/client-go/discovery k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached k8s.io/client-go/discovery/cached
k8s.io/client-go/discovery/cached/disk k8s.io/client-go/discovery/cached/disk
@ -1668,7 +1668,7 @@ k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue k8s.io/client-go/util/workqueue
# k8s.io/cloud-provider v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.16-k3s1 # k8s.io/cloud-provider v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.17-k3s1
k8s.io/cloud-provider k8s.io/cloud-provider
k8s.io/cloud-provider/api k8s.io/cloud-provider/api
k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/node/helpers
@ -1676,13 +1676,13 @@ k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/volume/helpers k8s.io/cloud-provider/volume/helpers
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.16-k3s1 # k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.17-k3s1
k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/api
k8s.io/cluster-bootstrap/token/jws k8s.io/cluster-bootstrap/token/jws
k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/token/util
k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/secrets
k8s.io/cluster-bootstrap/util/tokens k8s.io/cluster-bootstrap/util/tokens
# k8s.io/code-generator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.16-k3s1 # k8s.io/code-generator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.17-k3s1
k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/args
k8s.io/code-generator/cmd/client-gen/generators k8s.io/code-generator/cmd/client-gen/generators
k8s.io/code-generator/cmd/client-gen/generators/fake k8s.io/code-generator/cmd/client-gen/generators/fake
@ -1697,7 +1697,7 @@ k8s.io/code-generator/cmd/lister-gen/args
k8s.io/code-generator/cmd/lister-gen/generators k8s.io/code-generator/cmd/lister-gen/generators
k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/namer
k8s.io/code-generator/pkg/util k8s.io/code-generator/pkg/util
# k8s.io/component-base v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.16-k3s1 # k8s.io/component-base v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.17-k3s1
k8s.io/component-base/cli/flag k8s.io/component-base/cli/flag
k8s.io/component-base/cli/globalflag k8s.io/component-base/cli/globalflag
k8s.io/component-base/codec k8s.io/component-base/codec
@ -1715,10 +1715,10 @@ k8s.io/component-base/metrics/prometheus/workqueue
k8s.io/component-base/metrics/testutil k8s.io/component-base/metrics/testutil
k8s.io/component-base/version k8s.io/component-base/version
k8s.io/component-base/version/verflag k8s.io/component-base/version/verflag
# k8s.io/cri-api v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.16-k3s1 # k8s.io/cri-api v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.17-k3s1
k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis
k8s.io/cri-api/pkg/apis/runtime/v1alpha2 k8s.io/cri-api/pkg/apis/runtime/v1alpha2
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.16-k3s1 # k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.17-k3s1
k8s.io/csi-translation-lib k8s.io/csi-translation-lib
k8s.io/csi-translation-lib/plugins k8s.io/csi-translation-lib/plugins
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 # k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
@ -1733,7 +1733,7 @@ k8s.io/gengo/types
k8s.io/heapster/metrics/api/v1/types k8s.io/heapster/metrics/api/v1/types
# k8s.io/klog v1.0.0 # k8s.io/klog v1.0.0
k8s.io/klog k8s.io/klog
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.16-k3s1 # k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.17-k3s1
k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration
k8s.io/kube-aggregator/pkg/apis/apiregistration/install k8s.io/kube-aggregator/pkg/apis/apiregistration/install
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
@ -1761,7 +1761,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
k8s.io/kube-aggregator/pkg/registry/apiservice k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
k8s.io/kube-aggregator/pkg/registry/apiservice/rest k8s.io/kube-aggregator/pkg/registry/apiservice/rest
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.16-k3s1 # k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.17-k3s1
k8s.io/kube-controller-manager/config/v1alpha1 k8s.io/kube-controller-manager/config/v1alpha1
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 # k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
k8s.io/kube-openapi/pkg/aggregator k8s.io/kube-openapi/pkg/aggregator
@ -1772,14 +1772,14 @@ k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util
k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/util/proto/validation
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.16-k3s1 # k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.17-k3s1
k8s.io/kube-proxy/config/v1alpha1 k8s.io/kube-proxy/config/v1alpha1
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.16-k3s1 # k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.17-k3s1
k8s.io/kube-scheduler/config/v1 k8s.io/kube-scheduler/config/v1
k8s.io/kube-scheduler/config/v1alpha1 k8s.io/kube-scheduler/config/v1alpha1
k8s.io/kube-scheduler/config/v1alpha2 k8s.io/kube-scheduler/config/v1alpha2
k8s.io/kube-scheduler/extender/v1 k8s.io/kube-scheduler/extender/v1
# k8s.io/kubectl v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.16-k3s1 # k8s.io/kubectl v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.17-k3s1
k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/apps
k8s.io/kubectl/pkg/cmd k8s.io/kubectl/pkg/cmd
k8s.io/kubectl/pkg/cmd/annotate k8s.io/kubectl/pkg/cmd/annotate
@ -1854,11 +1854,11 @@ k8s.io/kubectl/pkg/util/storage
k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/templates
k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/validation k8s.io/kubectl/pkg/validation
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.16-k3s1 # k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.17-k3s1
k8s.io/kubelet/config/v1beta1 k8s.io/kubelet/config/v1beta1
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/pluginregistration/v1
# k8s.io/kubernetes v1.18.16 => github.com/k3s-io/kubernetes v1.18.16-k3s1 # k8s.io/kubernetes v1.18.17 => github.com/k3s-io/kubernetes v1.18.17-k3s1
k8s.io/kubernetes/cmd/cloud-controller-manager/app k8s.io/kubernetes/cmd/cloud-controller-manager/app
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
@ -2601,7 +2601,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/simple
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
k8s.io/kubernetes/third_party/forked/ipvs k8s.io/kubernetes/third_party/forked/ipvs
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.16-k3s1 # k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.17-k3s1
k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/aws
k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure
k8s.io/legacy-cloud-providers/azure/auth k8s.io/legacy-cloud-providers/azure/auth
@ -2632,7 +2632,7 @@ k8s.io/legacy-cloud-providers/openstack
k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere
k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.16-k3s1 # k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.17-k3s1
k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2