Update to v1.18.16 (#2950)

* Update to v1.18.16

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Update to v1.18.16

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
pull/3084/head v1.18.16+k3s1
Hussein Galal 2021-02-18 23:21:51 +02:00 committed by GitHub
parent b8f90a031a
commit 8c7dd1395f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 823 additions and 285 deletions

52
go.mod
View File

@ -33,31 +33,31 @@ replace (
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.15-k3s1 k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.16-k3s1
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.15-k3s1 k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.16-k3s1
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.15-k3s1 k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.16-k3s1
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.15-k3s1 k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.16-k3s1
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.15-k3s1 k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.16-k3s1
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.15-k3s1 k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.16-k3s1
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.15-k3s1 k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.16-k3s1
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.15-k3s1 k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.16-k3s1
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.15-k3s1 k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.16-k3s1
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.15-k3s1 k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.16-k3s1
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.15-k3s1 k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.16-k3s1
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.15-k3s1 k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.16-k3s1
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.15-k3s1 k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.16-k3s1
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.15-k3s1 k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.16-k3s1
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.15-k3s1 k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.16-k3s1
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.15-k3s1 k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.16-k3s1
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.15-k3s1 k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.16-k3s1
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.15-k3s1 k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.16-k3s1
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.18.15-k3s1 k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.18.16-k3s1
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.15-k3s1 k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.16-k3s1
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.15-k3s1 k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.16-k3s1
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.18.15-k3s1 k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.18.16-k3s1
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.15-k3s1 k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.16-k3s1
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.15-k3s1 k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.16-k3s1
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.18.15-k3s1 k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.18.16-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
) )
@ -124,5 +124,5 @@ require (
k8s.io/component-base v0.0.0 k8s.io/component-base v0.0.0
k8s.io/cri-api v0.0.0 k8s.io/cri-api v0.0.0
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/kubernetes v1.18.15 k8s.io/kubernetes v1.18.16
) )

86
go.sum
View File

@ -458,49 +458,49 @@ github.com/k3s-io/cri v1.3.0-k3s.9 h1:1PUJysV7YQElM5JEjnHdOQ2kgwtTiSJ5fpx1j4/Wjz
github.com/k3s-io/cri v1.3.0-k3s.9/go.mod h1:fGPUUHMKQik/vIegSe05DtX/m4miovdtvVLqRUFAkK0= github.com/k3s-io/cri v1.3.0-k3s.9/go.mod h1:fGPUUHMKQik/vIegSe05DtX/m4miovdtvVLqRUFAkK0=
github.com/k3s-io/helm-controller v0.8.3 h1:GWxavyMz7Bw2ClxH5okkeOL8o5U6IBK7uauc44SDCjU= github.com/k3s-io/helm-controller v0.8.3 h1:GWxavyMz7Bw2ClxH5okkeOL8o5U6IBK7uauc44SDCjU=
github.com/k3s-io/helm-controller v0.8.3/go.mod h1:nZP8FH3KZrNNUf5r+SwwiMR63HS6lxdHdpHijgPfF74= github.com/k3s-io/helm-controller v0.8.3/go.mod h1:nZP8FH3KZrNNUf5r+SwwiMR63HS6lxdHdpHijgPfF74=
github.com/k3s-io/kubernetes v1.18.15-k3s1 h1:hcQXepHGxTP7G3FyoHFbOjDKni1xq2vWIIoFhlBG83M= github.com/k3s-io/kubernetes v1.18.16-k3s1 h1:VOCBgjlSMQsZhU/Qn3Y0bny3y74tBjovp6m2VclqNbc=
github.com/k3s-io/kubernetes v1.18.15-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8= github.com/k3s-io/kubernetes v1.18.16-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.15-k3s1 h1:ERXYS6TQ2qlSyPhlxJbv6n/2/aZ0WEu0AIGufSy2afQ= github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.16-k3s1 h1:U+tqB0V02DqEl7zblwc0hLJKhmvubnkLGvEh/NVvGRc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.15-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w= github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.16-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.15-k3s1 h1:a3sOwx5FCPW0uwoqqWChUw/IJ9FVL8Zv89/WNaqqCIQ= github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.16-k3s1 h1:iG62cqadChdENW46VQutzG9Oeck5BO7lKsNbudIYwp4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.15-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk= github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.16-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.15-k3s1 h1:12DQjfDbSpRDcbNh6WXL6SiqT4cIR0O1bCAQVucz730= github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.16-k3s1 h1:hoIFgutUJrIjo6iOF0MOQXv6zJtD3srbg4Bdn/Gl2qM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.15-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o= github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.16-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.15-k3s1 h1:8Nbmny/CjMkCv/qxsRo2x024xDQMq+k3UFy5R7JeIxQ= github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.16-k3s1 h1:v0oEi/fzkt1mqpoPCsmPKFmW8jGS2zItcojQw6WmDq8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.15-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw= github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.16-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.15-k3s1 h1:a1OePV28qNA21Tv+FXNL7jsTt1YZhwx9miAeL4AGw78= github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.16-k3s1 h1:16OlDNNaI/ixfPy94CPtk7bZjICaR9Wl4Eqg4t73x8U=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.15-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE= github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.16-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.15-k3s1 h1:mVqozxuZ1SmQPTBNaEWxn7mRO5/WW0LJpDY0yhwBN/c= github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.16-k3s1 h1:bJc5E87t0lMfd6W+vMw7swO3D8ELIWGmkzNCiKTMJD8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.15-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos= github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.16-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.15-k3s1 h1:HDdj8TJWO0ehGltPqYInSz9j3ZVmTk3kv7INSat6n/k= github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.16-k3s1 h1:eiavvS0HQcpkepHwQE+x7TxjWzNYYHzxXupYFFBodus=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.15-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI= github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.16-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.15-k3s1 h1:dvdp5/pqf97qXZmEE5UCxrA4CZeMqP9OvgehNsgJ9Tg= github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.16-k3s1 h1:7fXjYEQAriIqwuQE39xgZnMjzIRmBC6hPm134mT8LsA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.15-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8= github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.16-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.15-k3s1 h1:K+RmscxNNgyCoOsUxGf3hFK5JWz8ZuPhh6o83B/rIEI= github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.16-k3s1 h1:fmow5tJk9U0l7g1Et5Pzz5vj09P6IEKa/fjAXyL5ZQY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.15-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw= github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.16-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.15-k3s1 h1:YP+cvLB8w0n4WbFrgu4mBMl2es2lf2phFC5N9MMs46U= github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.16-k3s1 h1:TNKQ5XZ11bKFxXomanfTA4DbWz7MqAUPhFF+8T/tqTw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.15-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q= github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.16-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.15-k3s1 h1:MvO8O98sFkRtRNe9QnrnPtB4ofkIuFqbZ+VpOWNzu8A= github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.16-k3s1 h1:z0SjsT0qnzA1u2HqNtFeeXZer6TDhiY0ov7caLogwng=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.15-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk= github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.16-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.15-k3s1 h1:ue0NXiNu2vf9M6t7lDs3B9EzEpi5TittToIa8oMpnrg= github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.16-k3s1 h1:9ytp165C2ywtg3pZsdy4fyI2nlWDfVkIlWsHj5Z+tNM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.15-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE= github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.16-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.15-k3s1 h1:q2e6GYEnrpN32T8KB2aFLgysnN4RAowEJCu44xmcK8Y= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.16-k3s1 h1:3BJ4AzNapPb5XumVEBX53/HAmhnqsuv0GuCKE1sOxEU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.15-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.16-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.15-k3s1 h1:SiEKqX854ZFJMypiSBZ9ZbKNVhL55XckQd3YUjUrY3Q= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.16-k3s1 h1:+0T2TH6YJGs/VSsXiPfU7ew1BJNSvrFRAw+H66jpMuU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.15-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.16-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.15-k3s1 h1:69g241Ox8rJJk3FXsOOL1aO+kiy2I5elZagM0+QZM5w= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.16-k3s1 h1:kvIfXmOsTwglK9EW9nhgIThQJZ/K2o75v3XmSlc19cM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.15-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.16-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.15-k3s1 h1:Ujtu5t3T3Tnwr62aZYdpLxg/VYyt5aViSbgW/Aj6rzo= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.16-k3s1 h1:fpXQAwgsxQtVzEYXcJJ+I0y2XfmPZTND5+R9jOlL0Ag=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.15-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.16-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.15-k3s1 h1:xtxBmtb9TyIYjGHi882xXpCvM/tQw27uqmtwQXeGQK8= github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.16-k3s1 h1:ZNU6UhplF3o9bQgFW/kAxgzIYuQyJc5sy8gd7WfuCx0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.15-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA= github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.16-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.15-k3s1 h1:q4gZT6sv7X2mj0tgkIXK5TzItWh4zSGhchJr+NbfeVI= github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.16-k3s1 h1:mEcew1xAHJjG7wGlXE88R5kz9NcIPNZI361WhPDHDIY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.15-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew= github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.16-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.15-k3s1 h1:48r/lsJ3vwhI5uW/PNcFgLSSBKGsVjUNBLUv14Y0dFk= github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.16-k3s1 h1:9siB+L5MHHZs/IbN9NV43WYkvr9kLMjeFIEErfigf+w=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.15-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA= github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.16-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.15-k3s1 h1:SEL4F76VARKNIBUmP34CIt6ZFHrr2wtBKVlSV730EtU= github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.16-k3s1 h1:7RC1767UxORogidkHORJTBi8pOjTWx6kD6wAi14oaj0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.15-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y= github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.16-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.15-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo= github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.16-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
github.com/karrick/godirwalk v1.7.5 h1:VbzFqwXwNbAZoA6W5odrLr+hKK197CcENcPh6E/gJ0M= github.com/karrick/godirwalk v1.7.5 h1:VbzFqwXwNbAZoA6W5odrLr+hKK197CcENcPh6E/gJ0M=
github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=

View File

@ -19,6 +19,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
@ -73,6 +74,7 @@ go_test(
"//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library", "//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",

View File

@ -34,6 +34,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
apitypes "k8s.io/apimachinery/pkg/types" apitypes "k8s.io/apimachinery/pkg/types"
apierrors "k8s.io/apimachinery/pkg/util/errors" apierrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
fcboot "k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap" fcboot "k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap"
"k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authentication/user"
@ -221,14 +222,28 @@ func (cfgCtl *configController) initializeConfigController(informerFactory kubei
}}) }})
} }
// used from the unit tests only.
func (cfgCtl *configController) getPriorityLevelState(plName string) *priorityLevelState {
cfgCtl.lock.Lock()
defer cfgCtl.lock.Unlock()
return cfgCtl.priorityLevelStates[plName]
}
func (cfgCtl *configController) Run(stopCh <-chan struct{}) error { func (cfgCtl *configController) Run(stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
// Let the config worker stop when we are done
defer cfgCtl.configQueue.ShutDown() defer cfgCtl.configQueue.ShutDown()
klog.Info("Starting API Priority and Fairness config controller") klog.Info("Starting API Priority and Fairness config controller")
if ok := cache.WaitForCacheSync(stopCh, cfgCtl.plInformerSynced, cfgCtl.fsInformerSynced); !ok { if ok := cache.WaitForCacheSync(stopCh, cfgCtl.plInformerSynced, cfgCtl.fsInformerSynced); !ok {
return fmt.Errorf("Never achieved initial sync") return fmt.Errorf("Never achieved initial sync")
} }
klog.Info("Running API Priority and Fairness config worker") klog.Info("Running API Priority and Fairness config worker")
wait.Until(cfgCtl.runWorker, time.Second, stopCh) go wait.Until(cfgCtl.runWorker, time.Second, stopCh)
<-stopCh
klog.Info("Shutting down API Priority and Fairness config worker") klog.Info("Shutting down API Priority and Fairness config worker")
return nil return nil
} }

View File

@ -98,21 +98,28 @@ func (cfgCtl *configController) Handle(ctx context.Context, requestDigest Reques
} }
klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, queued=%v", requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt, queued) klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, queued=%v", requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt, queued)
var executed bool var executed bool
idle := req.Finish(func() { idle, panicking := true, true
defer func() {
klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, queued=%v, Finish() => panicking=%v idle=%v",
requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt, queued, panicking, idle)
if idle {
cfgCtl.maybeReap(pl.Name)
}
}()
idle = req.Finish(func() {
if queued { if queued {
metrics.ObserveWaitingDuration(pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime)) metrics.ObserveWaitingDuration(pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime))
} }
metrics.AddDispatch(pl.Name, fs.Name) metrics.AddDispatch(pl.Name, fs.Name)
executed = true executed = true
startExecutionTime := time.Now() startExecutionTime := time.Now()
execFn() defer func() {
metrics.ObserveExecutionDuration(pl.Name, fs.Name, time.Since(startExecutionTime)) metrics.ObserveExecutionDuration(pl.Name, fs.Name, time.Since(startExecutionTime))
}()
execFn()
}) })
if queued && !executed { if queued && !executed {
metrics.ObserveWaitingDuration(pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime)) metrics.ObserveWaitingDuration(pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime))
} }
klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, queued=%v, Finish() => idle=%v", requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt, queued, idle) panicking = false
if idle {
cfgCtl.maybeReap(pl.Name)
}
} }

View File

@ -299,8 +299,15 @@ func (req *request) Finish(execFn func()) bool {
if !exec { if !exec {
return idle return idle
} }
func() {
defer func() {
idle = req.qs.finishRequestAndDispatchAsMuchAsPossible(req)
}()
execFn() execFn()
return req.qs.finishRequestAndDispatchAsMuchAsPossible(req) }()
return idle
} }
func (req *request) wait() (bool, bool) { func (req *request) wait() (bool, bool) {

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "18" gitMinor = "18"
gitVersion = "v1.18.15-k3s1" gitVersion = "v1.18.16-k3s1"
gitCommit = "92952488446669be6ff06c46c3e13de3fe395e75" gitCommit = "97eee15c8b5c26daccd339bb0ed95a777e1c85a6"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2021-01-13T19:13:58Z" buildDate = "2021-02-18T20:13:50Z"
) )

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "18" gitMinor = "18"
gitVersion = "v1.18.15-k3s1" gitVersion = "v1.18.16-k3s1"
gitCommit = "92952488446669be6ff06c46c3e13de3fe395e75" gitCommit = "97eee15c8b5c26daccd339bb0ed95a777e1c85a6"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2021-01-13T19:13:58Z" buildDate = "2021-02-18T20:13:50Z"
) )

View File

@ -45,6 +45,7 @@ go_test(
"azure_file_test.go", "azure_file_test.go",
"gce_pd_test.go", "gce_pd_test.go",
"in_tree_volume_test.go", "in_tree_volume_test.go",
"openstack_cinder_test.go",
], ],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [

View File

@ -98,7 +98,7 @@ func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeInlineVolumeToCSI(vol
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
// Must be unique per disk as it is used as the unique part of the // Must be unique per disk as it is used as the unique part of the
// staging path // staging path
Name: fmt.Sprintf("%s-%s", AWSEBSDriverName, ebsSource.VolumeID), Name: fmt.Sprintf("%s-%s", AWSEBSDriverName, volumeHandle),
}, },
Spec: v1.PersistentVolumeSpec{ Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{ PersistentVolumeSource: v1.PersistentVolumeSource{

View File

@ -37,11 +37,11 @@ const (
volumeIDTemplate = "%s#%s#%s#%s" volumeIDTemplate = "%s#%s#%s#%s"
// Parameter names defined in azure file CSI driver, refer to // Parameter names defined in azure file CSI driver, refer to
// https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md // https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
azureFileShareName = "shareName" shareNameField = "sharename"
secretNameField = "secretname"
secretNamespaceField = "secretnamespace"
secretNameTemplate = "azure-storage-account-%s-secret" secretNameTemplate = "azure-storage-account-%s-secret"
defaultSecretNamespace = "default" defaultSecretNamespace = "default"
resourceGroupAnnotation = "kubernetes.io/azure-file-resource-group" resourceGroupAnnotation = "kubernetes.io/azure-file-resource-group"
) )
@ -90,7 +90,7 @@ func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Vol
Driver: AzureFileDriverName, Driver: AzureFileDriverName,
VolumeHandle: fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, ""), VolumeHandle: fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, ""),
ReadOnly: azureSource.ReadOnly, ReadOnly: azureSource.ReadOnly,
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName}, VolumeAttributes: map[string]string{shareNameField: azureSource.ShareName},
NodeStageSecretRef: &v1.SecretReference{ NodeStageSecretRef: &v1.SecretReference{
Name: azureSource.SecretName, Name: azureSource.SecretName,
Namespace: defaultSecretNamespace, Namespace: defaultSecretNamespace,
@ -135,7 +135,7 @@ func (t *azureFileCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
Namespace: defaultSecretNamespace, Namespace: defaultSecretNamespace,
}, },
ReadOnly: azureSource.ReadOnly, ReadOnly: azureSource.ReadOnly,
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName}, VolumeAttributes: map[string]string{shareNameField: azureSource.ShareName},
VolumeHandle: volumeID, VolumeHandle: volumeID,
} }
) )
@ -163,31 +163,48 @@ func (t *azureFileCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume)
ReadOnly: csiSource.ReadOnly, ReadOnly: csiSource.ReadOnly,
} }
for k, v := range csiSource.VolumeAttributes {
switch strings.ToLower(k) {
case shareNameField:
azureSource.ShareName = v
case secretNameField:
azureSource.SecretName = v
case secretNamespaceField:
ns := v
azureSource.SecretNamespace = &ns
}
}
resourceGroup := "" resourceGroup := ""
if csiSource.NodeStageSecretRef != nil && csiSource.NodeStageSecretRef.Name != "" { if csiSource.NodeStageSecretRef != nil && csiSource.NodeStageSecretRef.Name != "" {
azureSource.SecretName = csiSource.NodeStageSecretRef.Name azureSource.SecretName = csiSource.NodeStageSecretRef.Name
azureSource.SecretNamespace = &csiSource.NodeStageSecretRef.Namespace azureSource.SecretNamespace = &csiSource.NodeStageSecretRef.Namespace
if csiSource.VolumeAttributes != nil {
if shareName, ok := csiSource.VolumeAttributes[azureFileShareName]; ok {
azureSource.ShareName = shareName
} }
} if azureSource.ShareName == "" || azureSource.SecretName == "" {
} else {
rg, storageAccount, fileShareName, _, err := getFileShareInfo(csiSource.VolumeHandle) rg, storageAccount, fileShareName, _, err := getFileShareInfo(csiSource.VolumeHandle)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if azureSource.ShareName == "" {
azureSource.ShareName = fileShareName azureSource.ShareName = fileShareName
}
if azureSource.SecretName == "" {
azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount) azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount)
}
resourceGroup = rg resourceGroup = rg
} }
if azureSource.SecretNamespace == nil {
ns := defaultSecretNamespace
azureSource.SecretNamespace = &ns
}
pv.Spec.CSI = nil pv.Spec.CSI = nil
pv.Spec.AzureFile = azureSource pv.Spec.AzureFile = azureSource
if resourceGroup != "" {
if pv.ObjectMeta.Annotations == nil { if pv.ObjectMeta.Annotations == nil {
pv.ObjectMeta.Annotations = map[string]string{} pv.ObjectMeta.Annotations = map[string]string{}
} }
if resourceGroup != "" {
pv.ObjectMeta.Annotations[resourceGroupAnnotation] = resourceGroup pv.ObjectMeta.Annotations[resourceGroupAnnotation] = resourceGroup
} }

View File

@ -18,6 +18,7 @@ package plugins
import ( import (
"fmt" "fmt"
"strings"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
@ -45,6 +46,30 @@ func NewOpenStackCinderCSITranslator() InTreePlugin {
// TranslateInTreeStorageClassParametersToCSI translates InTree Cinder storage class parameters to CSI storage class // TranslateInTreeStorageClassParametersToCSI translates InTree Cinder storage class parameters to CSI storage class
func (t *osCinderCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.StorageClass) (*storage.StorageClass, error) { func (t *osCinderCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.StorageClass) (*storage.StorageClass, error) {
var (
params = map[string]string{}
)
for k, v := range sc.Parameters {
switch strings.ToLower(k) {
case fsTypeKey:
params[csiFsTypeKey] = v
default:
// All other parameters are supported by the CSI driver.
// This includes also "availability", therefore do not translate it to sc.AllowedTopologies
params[k] = v
}
}
if len(sc.AllowedTopologies) > 0 {
newTopologies, err := translateAllowedTopologies(sc.AllowedTopologies, CinderTopologyKey)
if err != nil {
return nil, fmt.Errorf("failed translating allowed topologies: %v", err)
}
sc.AllowedTopologies = newTopologies
}
sc.Parameters = params
return sc, nil return sc, nil
} }

View File

@ -41,7 +41,10 @@ go_library(
go_test( go_test(
name = "go_default_test", name = "go_default_test",
srcs = ["available_controller_test.go"], srcs = [
"available_controller_test.go",
"metrics_test.go",
],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
@ -50,6 +53,7 @@ go_test(
"//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/component-base/metrics/testutil:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1:go_default_library",

View File

@ -42,6 +42,7 @@ import (
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/transport" "k8s.io/client-go/transport"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog" "k8s.io/klog"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
apiregistrationv1apihelper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper" apiregistrationv1apihelper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper"
@ -51,6 +52,9 @@ import (
"k8s.io/kube-aggregator/pkg/controllers" "k8s.io/kube-aggregator/pkg/controllers"
) )
// making sure we only register metrics once into legacy registry
var registerIntoLegacyRegistryOnce sync.Once
// ServiceResolver knows how to convert a service reference into an actual location. // ServiceResolver knows how to convert a service reference into an actual location.
type ServiceResolver interface { type ServiceResolver interface {
ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error)
@ -81,6 +85,9 @@ type AvailableConditionController struct {
cache map[string]map[string][]string cache map[string]map[string][]string
// this lock protects operations on the above cache // this lock protects operations on the above cache
cacheLock sync.RWMutex cacheLock sync.RWMutex
// metrics registered into legacy registry
metrics *availabilityMetrics
} }
// NewAvailableConditionController returns a new AvailableConditionController. // NewAvailableConditionController returns a new AvailableConditionController.
@ -110,6 +117,7 @@ func NewAvailableConditionController(
// the maximum disruption time to a minimum, but it does prevent hot loops. // the maximum disruption time to a minimum, but it does prevent hot loops.
workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 30*time.Second), workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 30*time.Second),
"AvailableConditionController"), "AvailableConditionController"),
metrics: newAvailabilityMetrics(),
} }
// if a particular transport was specified, use that otherwise build one // if a particular transport was specified, use that otherwise build one
@ -171,12 +179,21 @@ func NewAvailableConditionController(
c.syncFn = c.sync c.syncFn = c.sync
// TODO: decouple from legacyregistry
registerIntoLegacyRegistryOnce.Do(func() {
err = c.metrics.Register(legacyregistry.Register, legacyregistry.CustomRegister)
})
if err != nil {
return nil, err
}
return c, nil return c, nil
} }
func (c *AvailableConditionController) sync(key string) error { func (c *AvailableConditionController) sync(key string) error {
originalAPIService, err := c.apiServiceLister.Get(key) originalAPIService, err := c.apiServiceLister.Get(key)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
c.metrics.ForgetAPIService(key)
return nil return nil
} }
if err != nil { if err != nil {
@ -194,7 +211,7 @@ func (c *AvailableConditionController) sync(key string) error {
// local API services are always considered available // local API services are always considered available
if apiService.Spec.Service == nil { if apiService.Spec.Service == nil {
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, apiregistrationv1apihelper.NewLocalAvailableAPIServiceCondition()) apiregistrationv1apihelper.SetAPIServiceCondition(apiService, apiregistrationv1apihelper.NewLocalAvailableAPIServiceCondition())
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) _, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err return err
} }
@ -204,14 +221,14 @@ func (c *AvailableConditionController) sync(key string) error {
availableCondition.Reason = "ServiceNotFound" availableCondition.Reason = "ServiceNotFound"
availableCondition.Message = fmt.Sprintf("service/%s in %q is not present", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace) availableCondition.Message = fmt.Sprintf("service/%s in %q is not present", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace)
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition) apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) _, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err return err
} else if err != nil { } else if err != nil {
availableCondition.Status = apiregistrationv1.ConditionUnknown availableCondition.Status = apiregistrationv1.ConditionUnknown
availableCondition.Reason = "ServiceAccessError" availableCondition.Reason = "ServiceAccessError"
availableCondition.Message = fmt.Sprintf("service/%s in %q cannot be checked due to: %v", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, err) availableCondition.Message = fmt.Sprintf("service/%s in %q cannot be checked due to: %v", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, err)
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition) apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) _, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err return err
} }
@ -232,7 +249,7 @@ func (c *AvailableConditionController) sync(key string) error {
availableCondition.Reason = "ServicePortError" availableCondition.Reason = "ServicePortError"
availableCondition.Message = fmt.Sprintf("service/%s in %q is not listening on port %d", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, *apiService.Spec.Service.Port) availableCondition.Message = fmt.Sprintf("service/%s in %q is not listening on port %d", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, *apiService.Spec.Service.Port)
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition) apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) _, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err return err
} }
@ -242,14 +259,14 @@ func (c *AvailableConditionController) sync(key string) error {
availableCondition.Reason = "EndpointsNotFound" availableCondition.Reason = "EndpointsNotFound"
availableCondition.Message = fmt.Sprintf("cannot find endpoints for service/%s in %q", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace) availableCondition.Message = fmt.Sprintf("cannot find endpoints for service/%s in %q", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace)
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition) apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) _, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err return err
} else if err != nil { } else if err != nil {
availableCondition.Status = apiregistrationv1.ConditionUnknown availableCondition.Status = apiregistrationv1.ConditionUnknown
availableCondition.Reason = "EndpointsAccessError" availableCondition.Reason = "EndpointsAccessError"
availableCondition.Message = fmt.Sprintf("service/%s in %q cannot be checked due to: %v", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, err) availableCondition.Message = fmt.Sprintf("service/%s in %q cannot be checked due to: %v", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, err)
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition) apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) _, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err return err
} }
hasActiveEndpoints := false hasActiveEndpoints := false
@ -270,7 +287,7 @@ func (c *AvailableConditionController) sync(key string) error {
availableCondition.Reason = "MissingEndpoints" availableCondition.Reason = "MissingEndpoints"
availableCondition.Message = fmt.Sprintf("endpoints for service/%s in %q have no addresses with port name %q", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, portName) availableCondition.Message = fmt.Sprintf("endpoints for service/%s in %q have no addresses with port name %q", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, portName)
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition) apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) _, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err return err
} }
} }
@ -343,7 +360,7 @@ func (c *AvailableConditionController) sync(key string) error {
availableCondition.Reason = "FailedDiscoveryCheck" availableCondition.Reason = "FailedDiscoveryCheck"
availableCondition.Message = lastError.Error() availableCondition.Message = lastError.Error()
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition) apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, updateErr := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) _, updateErr := c.updateAPIServiceStatus(originalAPIService, apiService)
if updateErr != nil { if updateErr != nil {
return updateErr return updateErr
} }
@ -356,26 +373,26 @@ func (c *AvailableConditionController) sync(key string) error {
availableCondition.Reason = "Passed" availableCondition.Reason = "Passed"
availableCondition.Message = "all checks passed" availableCondition.Message = "all checks passed"
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition) apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err = updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService) _, err = c.updateAPIServiceStatus(originalAPIService, apiService)
return err return err
} }
// updateAPIServiceStatus only issues an update if a change is detected. We have a tight resync loop to quickly detect dead // updateAPIServiceStatus only issues an update if a change is detected. We have a tight resync loop to quickly detect dead
// apiservices. Doing that means we don't want to quickly issue no-op updates. // apiservices. Doing that means we don't want to quickly issue no-op updates.
func updateAPIServiceStatus(client apiregistrationclient.APIServicesGetter, originalAPIService, newAPIService *apiregistrationv1.APIService) (*apiregistrationv1.APIService, error) { func (c *AvailableConditionController) updateAPIServiceStatus(originalAPIService, newAPIService *apiregistrationv1.APIService) (*apiregistrationv1.APIService, error) {
// update this metric on every sync operation to reflect the actual state // update this metric on every sync operation to reflect the actual state
setUnavailableGauge(newAPIService) c.setUnavailableGauge(newAPIService)
if equality.Semantic.DeepEqual(originalAPIService.Status, newAPIService.Status) { if equality.Semantic.DeepEqual(originalAPIService.Status, newAPIService.Status) {
return newAPIService, nil return newAPIService, nil
} }
newAPIService, err := client.APIServices().UpdateStatus(context.TODO(), newAPIService, metav1.UpdateOptions{}) newAPIService, err := c.apiServiceClient.APIServices().UpdateStatus(context.TODO(), newAPIService, metav1.UpdateOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
setUnavailableCounter(originalAPIService, newAPIService) c.setUnavailableCounter(originalAPIService, newAPIService)
return newAPIService, nil return newAPIService, nil
} }
@ -560,17 +577,17 @@ func (c *AvailableConditionController) deleteEndpoints(obj interface{}) {
} }
// setUnavailableGauge set the metrics so that it reflect the current state base on availability of the given service // setUnavailableGauge set the metrics so that it reflect the current state base on availability of the given service
func setUnavailableGauge(newAPIService *apiregistrationv1.APIService) { func (c *AvailableConditionController) setUnavailableGauge(newAPIService *apiregistrationv1.APIService) {
if apiregistrationv1apihelper.IsAPIServiceConditionTrue(newAPIService, apiregistrationv1.Available) { if apiregistrationv1apihelper.IsAPIServiceConditionTrue(newAPIService, apiregistrationv1.Available) {
unavailableGauge.WithLabelValues(newAPIService.Name).Set(0.0) c.metrics.SetAPIServiceAvailable(newAPIService.Name)
return return
} }
unavailableGauge.WithLabelValues(newAPIService.Name).Set(1.0) c.metrics.SetAPIServiceUnavailable(newAPIService.Name)
} }
// setUnavailableCounter increases the metrics only if the given service is unavailable and its APIServiceCondition has changed // setUnavailableCounter increases the metrics only if the given service is unavailable and its APIServiceCondition has changed
func setUnavailableCounter(originalAPIService, newAPIService *apiregistrationv1.APIService) { func (c *AvailableConditionController) setUnavailableCounter(originalAPIService, newAPIService *apiregistrationv1.APIService) {
wasAvailable := apiregistrationv1apihelper.IsAPIServiceConditionTrue(originalAPIService, apiregistrationv1.Available) wasAvailable := apiregistrationv1apihelper.IsAPIServiceConditionTrue(originalAPIService, apiregistrationv1.Available)
isAvailable := apiregistrationv1apihelper.IsAPIServiceConditionTrue(newAPIService, apiregistrationv1.Available) isAvailable := apiregistrationv1apihelper.IsAPIServiceConditionTrue(newAPIService, apiregistrationv1.Available)
statusChanged := isAvailable != wasAvailable statusChanged := isAvailable != wasAvailable
@ -580,6 +597,6 @@ func setUnavailableCounter(originalAPIService, newAPIService *apiregistrationv1.
if newCondition := apiregistrationv1apihelper.GetAPIServiceConditionByType(newAPIService, apiregistrationv1.Available); newCondition != nil { if newCondition := apiregistrationv1apihelper.GetAPIServiceConditionByType(newAPIService, apiregistrationv1.Available); newCondition != nil {
reason = newCondition.Reason reason = newCondition.Reason
} }
unavailableCounter.WithLabelValues(newAPIService.Name, reason).Inc() c.metrics.UnavailableCounter(newAPIService.Name, reason).Inc()
} }
} }

View File

@ -17,8 +17,9 @@ limitations under the License.
package apiserver package apiserver
import ( import (
"sync"
"k8s.io/component-base/metrics" "k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
) )
/* /*
@ -30,25 +31,120 @@ import (
* the metric stability policy. * the metric stability policy.
*/ */
var ( var (
unavailableCounter = metrics.NewCounterVec( unavailableGaugeDesc = metrics.NewDesc(
"aggregator_unavailable_apiservice",
"Gauge of APIServices which are marked as unavailable broken down by APIService name.",
[]string{"name"},
nil,
metrics.ALPHA,
"",
)
)
type availabilityMetrics struct {
unavailableCounter *metrics.CounterVec
*availabilityCollector
}
func newAvailabilityMetrics() *availabilityMetrics {
return &availabilityMetrics{
unavailableCounter: metrics.NewCounterVec(
&metrics.CounterOpts{ &metrics.CounterOpts{
Name: "aggregator_unavailable_apiservice_count", Name: "aggregator_unavailable_apiservice_total",
Help: "Counter of APIServices which are marked as unavailable broken down by APIService name and reason.", Help: "Counter of APIServices which are marked as unavailable broken down by APIService name and reason.",
StabilityLevel: metrics.ALPHA, StabilityLevel: metrics.ALPHA,
}, },
[]string{"name", "reason"}, []string{"name", "reason"},
) ),
unavailableGauge = metrics.NewGaugeVec( availabilityCollector: newAvailabilityCollector(),
&metrics.GaugeOpts{ }
Name: "aggregator_unavailable_apiservice", }
Help: "Gauge of APIServices which are marked as unavailable broken down by APIService name.",
StabilityLevel: metrics.ALPHA, // Register registers apiservice availability metrics.
}, func (m *availabilityMetrics) Register(
[]string{"name"}, registrationFunc func(metrics.Registerable) error,
) customRegistrationFunc func(metrics.StableCollector) error,
) ) error {
err := registrationFunc(m.unavailableCounter)
func init() { if err != nil {
legacyregistry.MustRegister(unavailableCounter) return err
legacyregistry.MustRegister(unavailableGauge) }
err = customRegistrationFunc(m.availabilityCollector)
if err != nil {
return err
}
return nil
}
// UnavailableCounter returns a counter to track apiservices marked as unavailable.
func (m *availabilityMetrics) UnavailableCounter(apiServiceName, reason string) metrics.CounterMetric {
return m.unavailableCounter.WithLabelValues(apiServiceName, reason)
}
type availabilityCollector struct {
metrics.BaseStableCollector
mtx sync.RWMutex
availabilities map[string]bool
}
// Check if apiServiceStatusCollector implements necessary interface.
var _ metrics.StableCollector = &availabilityCollector{}
func newAvailabilityCollector() *availabilityCollector {
return &availabilityCollector{
availabilities: make(map[string]bool),
}
}
// DescribeWithStability implements the metrics.StableCollector interface.
func (c *availabilityCollector) DescribeWithStability(ch chan<- *metrics.Desc) {
ch <- unavailableGaugeDesc
}
// CollectWithStability implements the metrics.StableCollector interface.
func (c *availabilityCollector) CollectWithStability(ch chan<- metrics.Metric) {
c.mtx.RLock()
defer c.mtx.RUnlock()
for apiServiceName, isAvailable := range c.availabilities {
gaugeValue := 1.0
if isAvailable {
gaugeValue = 0.0
}
ch <- metrics.NewLazyConstMetric(
unavailableGaugeDesc,
metrics.GaugeValue,
gaugeValue,
apiServiceName,
)
}
}
// SetAPIServiceAvailable sets the given apiservice availability gauge to available.
func (c *availabilityCollector) SetAPIServiceAvailable(apiServiceKey string) {
c.setAPIServiceAvailability(apiServiceKey, true)
}
// SetAPIServiceUnavailable sets the given apiservice availability gauge to unavailable.
func (c *availabilityCollector) SetAPIServiceUnavailable(apiServiceKey string) {
c.setAPIServiceAvailability(apiServiceKey, false)
}
func (c *availabilityCollector) setAPIServiceAvailability(apiServiceKey string, availability bool) {
c.mtx.Lock()
defer c.mtx.Unlock()
c.availabilities[apiServiceKey] = availability
}
// ForgetAPIService removes the availability gauge of the given apiservice.
func (c *availabilityCollector) ForgetAPIService(apiServiceKey string) {
c.mtx.Lock()
defer c.mtx.Unlock()
delete(c.availabilities, apiServiceKey)
} }

View File

@ -471,12 +471,12 @@ func (dc *DisruptionController) getPdbForPod(pod *v1.Pod) *policy.PodDisruptionB
// IMPORTANT NOTE : the returned pods should NOT be modified. // IMPORTANT NOTE : the returned pods should NOT be modified.
func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*v1.Pod, error) { func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*v1.Pod, error) {
sel, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector) sel, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)
if sel.Empty() {
return []*v1.Pod{}, nil
}
if err != nil { if err != nil {
return []*v1.Pod{}, err return []*v1.Pod{}, err
} }
if sel.Empty() {
return []*v1.Pod{}, nil
}
pods, err := dc.podLister.Pods(pdb.Namespace).List(sel) pods, err := dc.podLister.Pods(pdb.Namespace).List(sel)
if err != nil { if err != nil {
return []*v1.Pod{}, err return []*v1.Pod{}, err

View File

@ -411,7 +411,11 @@ func (adc *attachDetachController) populateActualStateOfWorld() error {
adc.addNodeToDswp(node, types.NodeName(node.Name)) adc.addNodeToDswp(node, types.NodeName(node.Name))
} }
} }
return nil err = adc.processVolumeAttachments()
if err != nil {
klog.Errorf("Failed to process volume attachments: %v", err)
}
return err
} }
func (adc *attachDetachController) getNodeVolumeDevicePath( func (adc *attachDetachController) getNodeVolumeDevicePath(
@ -481,7 +485,12 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
err) err)
continue continue
} }
if adc.actualStateOfWorld.IsVolumeAttachedToNode(volumeName, nodeName) { attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName)
if attachState == cache.AttachStateAttached {
klog.V(10).Infof("Volume %q is attached to node %q. Marking as attached in ActualStateOfWorld",
volumeName,
nodeName,
)
devicePath, err := adc.getNodeVolumeDevicePath(volumeName, nodeName) devicePath, err := adc.getNodeVolumeDevicePath(volumeName, nodeName)
if err != nil { if err != nil {
klog.Errorf("Failed to find device path: %v", err) klog.Errorf("Failed to find device path: %v", err)
@ -695,6 +704,67 @@ func (adc *attachDetachController) processVolumesInUse(
} }
} }
// Process Volume-Attachment objects.
// Should be called only after populating attached volumes in the ASW.
// For each VA object, this function checks if its present in the ASW.
// If not, adds the volume to ASW as an "uncertain" attachment.
// In the reconciler, the logic checks if the volume is present in the DSW;
// if yes, the reconciler will attempt attach on the volume;
// if not (could be a dangling attachment), the reconciler will detach this volume.
func (adc *attachDetachController) processVolumeAttachments() error {
vas, err := adc.volumeAttachmentLister.List(labels.Everything())
if err != nil {
klog.Errorf("failed to list VolumeAttachment objects: %v", err)
return err
}
for _, va := range vas {
nodeName := types.NodeName(va.Spec.NodeName)
pvName := va.Spec.Source.PersistentVolumeName
if pvName == nil {
// Currently VA objects are created for CSI volumes only. nil pvName is unexpected, generate a warning
klog.Warningf("Skipping the va as its pvName is nil, va.Name: %q, nodeName: %q",
va.Name, nodeName)
continue
}
pv, err := adc.pvLister.Get(*pvName)
if err != nil {
klog.Errorf("Unable to lookup pv object for: %q, err: %v", *pvName, err)
continue
}
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
plugin, err := adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || plugin == nil {
// Currently VA objects are created for CSI volumes only. nil plugin is unexpected, generate a warning
klog.Warningf(
"Skipping processing the volume %q on nodeName: %q, no attacher interface found. err=%v",
*pvName,
nodeName,
err)
continue
}
volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
if err != nil {
klog.Errorf(
"Failed to find unique name for volume:%q, va.Name:%q, nodeName:%q: %v",
*pvName,
va.Name,
nodeName,
err)
continue
}
attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName)
if attachState == cache.AttachStateDetached {
klog.V(1).Infof("Marking volume attachment as uncertain as volume:%q (%q) is not attached (%v)",
volumeName, nodeName, attachState)
err = adc.actualStateOfWorld.MarkVolumeAsUncertain(volumeName, volumeSpec, nodeName)
if err != nil {
klog.Errorf("MarkVolumeAsUncertain fail to add the volume %q (%q) to ASW. err: %s", volumeName, nodeName, err)
}
}
}
return nil
}
var _ volume.VolumeHost = &attachDetachController{} var _ volume.VolumeHost = &attachDetachController{}
var _ volume.AttachDetachVolumeHost = &attachDetachController{} var _ volume.AttachDetachVolumeHost = &attachDetachController{}

View File

@ -96,10 +96,13 @@ type ActualStateOfWorld interface {
// nodes, the volume is also deleted. // nodes, the volume is also deleted.
DeleteVolumeNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName) DeleteVolumeNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// IsVolumeAttachedToNode returns true if the specified volume/node combo exists // GetAttachState returns the attach state for the given volume-node
// in the underlying store indicating the specified volume is attached to // combination.
// the specified node. // Returns AttachStateAttached if the specified volume/node combo exists in
IsVolumeAttachedToNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool // the underlying store indicating the specified volume is attached to the
// specified node, AttachStateDetached if the combo does not exist, or
// AttachStateUncertain if the attached state is marked as uncertain.
GetAttachState(volumeName v1.UniqueVolumeName, nodeName types.NodeName) AttachState
// GetAttachedVolumes generates and returns a list of volumes/node pairs // GetAttachedVolumes generates and returns a list of volumes/node pairs
// reflecting which volumes might attached to which nodes based on the // reflecting which volumes might attached to which nodes based on the
@ -153,6 +156,31 @@ type AttachedVolume struct {
DetachRequestedTime time.Time DetachRequestedTime time.Time
} }
// AttachState represents the attach state of a volume to a node known to the
// Actual State of World.
// This type is used as external representation of attach state (specifically
// as the return type of GetAttachState only); the state is represented
// differently in the internal cache implementation.
type AttachState int
const (
// AttachStateAttached represents the state in which the volume is attached to
// the node.
AttachStateAttached AttachState = iota
// AttachStateUncertain represents the state in which the Actual State of World
// does not know whether the volume is attached to the node.
AttachStateUncertain
// AttachStateDetached represents the state in which the volume is not
// attached to the node.
AttachStateDetached
)
func (s AttachState) String() string {
return []string{"Attached", "Uncertain", "Detached"}[s]
}
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld. // NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld { func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
return &actualStateOfWorld{ return &actualStateOfWorld{
@ -530,19 +558,22 @@ func (asw *actualStateOfWorld) DeleteVolumeNode(
asw.removeVolumeFromReportAsAttached(volumeName, nodeName) asw.removeVolumeFromReportAsAttached(volumeName, nodeName)
} }
func (asw *actualStateOfWorld) IsVolumeAttachedToNode( func (asw *actualStateOfWorld) GetAttachState(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool { volumeName v1.UniqueVolumeName, nodeName types.NodeName) AttachState {
asw.RLock() asw.RLock()
defer asw.RUnlock() defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName] volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if volumeExists { if volumeExists {
if node, nodeExists := volumeObj.nodesAttachedTo[nodeName]; nodeExists { if node, nodeExists := volumeObj.nodesAttachedTo[nodeName]; nodeExists {
return node.attachedConfirmed if node.attachedConfirmed {
return AttachStateAttached
}
return AttachStateUncertain
} }
} }
return false return AttachStateDetached
} }
func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume { func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {

View File

@ -142,6 +142,7 @@ func (rc *reconciler) reconcile() {
for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() { for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() {
if !rc.desiredStateOfWorld.VolumeExists( if !rc.desiredStateOfWorld.VolumeExists(
attachedVolume.VolumeName, attachedVolume.NodeName) { attachedVolume.VolumeName, attachedVolume.NodeName) {
// Check whether there already exist an operation pending, and don't even // Check whether there already exist an operation pending, and don't even
// try to start an operation if there is already one running. // try to start an operation if there is already one running.
// This check must be done before we do any other checks, as otherwise the other checks // This check must be done before we do any other checks, as otherwise the other checks
@ -161,6 +162,21 @@ func (rc *reconciler) reconcile() {
} }
} }
// Because the detach operation updates the ActualStateOfWorld before
// marking itself complete, it's possible for the volume to be removed
// from the ActualStateOfWorld between the GetAttachedVolumes() check
// and the IsOperationPending() check above.
// Check the ActualStateOfWorld again to avoid issuing an unnecessary
// detach.
// See https://github.com/kubernetes/kubernetes/issues/93902
attachState := rc.actualStateOfWorld.GetAttachState(attachedVolume.VolumeName, attachedVolume.NodeName)
if attachState == cache.AttachStateDetached {
if klog.V(5) {
klog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached--skipping", ""))
}
continue
}
// Set the detach request time // Set the detach request time
elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName) elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName)
if err != nil { if err != nil {
@ -226,7 +242,31 @@ func (rc *reconciler) reconcile() {
func (rc *reconciler) attachDesiredVolumes() { func (rc *reconciler) attachDesiredVolumes() {
// Ensure volumes that should be attached are attached. // Ensure volumes that should be attached are attached.
for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() { for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() {
if rc.actualStateOfWorld.IsVolumeAttachedToNode(volumeToAttach.VolumeName, volumeToAttach.NodeName) { if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
// Don't even try to start an operation if there is already one running for the given volume and node.
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName) {
if klog.V(10) {
klog.Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
continue
}
} else {
// Don't even try to start an operation if there is already one running for the given volume
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */) {
if klog.V(10) {
klog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
continue
}
}
// Because the attach operation updates the ActualStateOfWorld before
// marking itself complete, IsOperationPending() must be checked before
// GetAttachState() to guarantee the ActualStateOfWorld is
// up-to-date when it's read.
// See https://github.com/kubernetes/kubernetes/issues/93902
attachState := rc.actualStateOfWorld.GetAttachState(volumeToAttach.VolumeName, volumeToAttach.NodeName)
if attachState == cache.AttachStateAttached {
// Volume/Node exists, touch it to reset detachRequestedTime // Volume/Node exists, touch it to reset detachRequestedTime
if klog.V(5) { if klog.V(5) {
klog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", "")) klog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", ""))
@ -235,26 +275,7 @@ func (rc *reconciler) attachDesiredVolumes() {
continue continue
} }
if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) { if !util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
// Don't even try to start an operation if there is already one running for the given volume and node.
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName) {
if klog.V(10) {
klog.Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
continue
}
} else {
// Don't even try to start an operation if there is already one running for the given volume
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */) {
if klog.V(10) {
klog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
continue
}
nodes := rc.actualStateOfWorld.GetNodesForAttachedVolume(volumeToAttach.VolumeName) nodes := rc.actualStateOfWorld.GetNodesForAttachedVolume(volumeToAttach.VolumeName)
if len(nodes) > 0 { if len(nodes) > 0 {
if !volumeToAttach.MultiAttachErrorReported { if !volumeToAttach.MultiAttachErrorReported {
@ -263,7 +284,6 @@ func (rc *reconciler) attachDesiredVolumes() {
} }
continue continue
} }
} }
// Volume/Node doesn't exist, spawn a goroutine to attach it // Volume/Node doesn't exist, spawn a goroutine to attach it

View File

@ -167,6 +167,8 @@ func NewManager(numaNodeInfo cputopology.NUMANodeInfo, topologyPolicyName string
} }
func (m *manager) GetAffinity(podUID string, containerName string) TopologyHint { func (m *manager) GetAffinity(podUID string, containerName string) TopologyHint {
m.mutex.Lock()
defer m.mutex.Unlock()
return m.podTopologyHints[podUID][containerName] return m.podTopologyHints[podUID][containerName]
} }
@ -256,10 +258,12 @@ func (m *manager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitR
} }
klog.Infof("[topologymanager] Topology Affinity for (pod: %v container: %v): %v", pod.UID, container.Name, result) klog.Infof("[topologymanager] Topology Affinity for (pod: %v container: %v): %v", pod.UID, container.Name, result)
m.mutex.Lock()
if m.podTopologyHints[string(pod.UID)] == nil { if m.podTopologyHints[string(pod.UID)] == nil {
m.podTopologyHints[string(pod.UID)] = make(map[string]TopologyHint) m.podTopologyHints[string(pod.UID)] = make(map[string]TopologyHint)
} }
m.podTopologyHints[string(pod.UID)][container.Name] = result m.podTopologyHints[string(pod.UID)][container.Name] = result
m.mutex.Unlock()
err := m.allocateAlignedResources(pod, &container) err := m.allocateAlignedResources(pod, &container)
if err != nil { if err != nil {

View File

@ -22,6 +22,7 @@ import (
"net/http" "net/http"
"path" "path"
"path/filepath" "path/filepath"
"runtime"
"sync" "sync"
"time" "time"
@ -252,6 +253,8 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon
ds.network = network.NewPluginManager(plug) ds.network = network.NewPluginManager(plug)
klog.Infof("Docker cri networking managed by %v", plug.Name()) klog.Infof("Docker cri networking managed by %v", plug.Name())
// skipping cgroup driver checks for Windows
if runtime.GOOS == "linux" {
// NOTE: cgroup driver is only detectable in docker 1.11+ // NOTE: cgroup driver is only detectable in docker 1.11+
cgroupDriver := defaultCgroupDriver cgroupDriver := defaultCgroupDriver
dockerInfo, err := ds.client.Info() dockerInfo, err := ds.client.Info()
@ -270,6 +273,8 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon
} }
klog.Infof("Setting cgroupDriver to %s", cgroupDriver) klog.Infof("Setting cgroupDriver to %s", cgroupDriver)
ds.cgroupDriver = cgroupDriver ds.cgroupDriver = cgroupDriver
}
ds.versionCache = cache.NewObjectCache( ds.versionCache = cache.NewObjectCache(
func() (interface{}, error) { func() (interface{}, error) {
return ds.getDockerVersion() return ds.getDockerVersion()

View File

@ -48,6 +48,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1" corelisters "k8s.io/client-go/listers/core/v1"
@ -127,6 +128,9 @@ const (
// Max amount of time to wait for the container runtime to come up. // Max amount of time to wait for the container runtime to come up.
maxWaitForContainerRuntime = 30 * time.Second maxWaitForContainerRuntime = 30 * time.Second
// Max amount of time to wait for node list/watch to initially sync
maxWaitForAPIServerSync = 10 * time.Second
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed. // nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
nodeStatusUpdateRetry = 5 nodeStatusUpdateRetry = 5
@ -136,11 +140,6 @@ const (
// MaxContainerBackOff is the max backoff period, exported for the e2e test // MaxContainerBackOff is the max backoff period, exported for the e2e test
MaxContainerBackOff = 300 * time.Second MaxContainerBackOff = 300 * time.Second
// Capacity of the channel for storing pods to kill. A small number should
// suffice because a goroutine is dedicated to check the channel and does
// not block on anything else.
podKillingChannelCapacity = 50
// Period for performing global cleanup tasks. // Period for performing global cleanup tasks.
housekeepingPeriod = time.Second * 2 housekeepingPeriod = time.Second * 2
@ -519,14 +518,30 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
} }
serviceLister := corelisters.NewServiceLister(serviceIndexer) serviceLister := corelisters.NewServiceLister(serviceIndexer)
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) var nodeHasSynced cache.InformerSynced
var nodeLister corelisters.NodeLister
if kubeDeps.KubeClient != nil { if kubeDeps.KubeClient != nil {
fieldSelector := fields.Set{api.ObjectNameField: string(nodeName)}.AsSelector() kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
nodeLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.CoreV1().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector) options.FieldSelector = fields.Set{api.ObjectNameField: string(nodeName)}.String()
r := cache.NewReflector(nodeLW, &v1.Node{}, nodeIndexer, 0) }))
go r.Run(wait.NeverStop) nodeLister = kubeInformers.Core().V1().Nodes().Lister()
nodeHasSynced = func() bool {
if kubeInformers.Core().V1().Nodes().Informer().HasSynced() {
return true
}
klog.Infof("kubelet nodes not sync")
return false
}
kubeInformers.Start(wait.NeverStop)
klog.Info("Kubelet client is not nil")
} else {
// we dont have a client to sync!
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
nodeLister = corelisters.NewNodeLister(nodeIndexer)
nodeHasSynced = func() bool { return true }
klog.Info("Kubelet client is nil")
} }
nodeLister := corelisters.NewNodeLister(nodeIndexer)
// TODO: get the real node object of ourself, // TODO: get the real node object of ourself,
// and use the real node name and UID. // and use the real node name and UID.
@ -578,6 +593,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, parsedNodeIP, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig), dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, parsedNodeIP, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig),
serviceLister: serviceLister, serviceLister: serviceLister,
nodeLister: nodeLister, nodeLister: nodeLister,
nodeHasSynced: nodeHasSynced,
masterServiceNamespace: masterServiceNamespace, masterServiceNamespace: masterServiceNamespace,
streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration, streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration,
recorder: kubeDeps.Recorder, recorder: kubeDeps.Recorder,
@ -843,7 +859,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.podWorkers = newPodWorkers(klet.syncPod, kubeDeps.Recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache) klet.podWorkers = newPodWorkers(klet.syncPod, kubeDeps.Recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache)
klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff) klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
klet.podKillingCh = make(chan *kubecontainer.PodPair, podKillingChannelCapacity) klet.podKiller = NewPodKiller(klet)
etcHostsPathFunc := func(podUID types.UID) string { return getEtcHostsPath(klet.getPodDir(podUID)) } etcHostsPathFunc := func(podUID types.UID) string { return getEtcHostsPath(klet.getPodDir(podUID)) }
// setup eviction manager // setup eviction manager
@ -967,7 +983,9 @@ type Kubelet struct {
serviceLister serviceLister serviceLister serviceLister
// nodeLister knows how to list nodes // nodeLister knows how to list nodes
nodeLister corelisters.NodeLister nodeLister corelisters.NodeLister
// nodeHasSynced indicates whether nodes have been sync'd at least once.
// Check this before trusting a response from the node lister.
nodeHasSynced cache.InformerSynced
// a list of node labels to register // a list of node labels to register
nodeLabels map[string]string nodeLabels map[string]string
@ -1129,8 +1147,8 @@ type Kubelet struct {
// Container restart Backoff // Container restart Backoff
backOff *flowcontrol.Backoff backOff *flowcontrol.Backoff
// Channel for sending pods to kill. // Pod killer handles pods to be killed
podKillingCh chan *kubecontainer.PodPair podKiller PodKiller
// Information about the ports which are opened by daemons on Node running this Kubelet server. // Information about the ports which are opened by daemons on Node running this Kubelet server.
daemonEndpoints *v1.NodeDaemonEndpoints daemonEndpoints *v1.NodeDaemonEndpoints
@ -1439,7 +1457,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
// Start a goroutine responsible for killing pods (that are not properly // Start a goroutine responsible for killing pods (that are not properly
// handled by pod workers). // handled by pod workers).
go wait.Until(kl.podKiller, 1*time.Second, wait.NeverStop) go wait.Until(kl.podKiller.PerformPodKillingWork, 1*time.Second, wait.NeverStop)
// Start component sync loops. // Start component sync loops.
kl.statusManager.Start() kl.statusManager.Start()
@ -1507,6 +1525,13 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
return nil return nil
} }
// If the pod is a static pod and its mirror pod is still gracefully terminating,
// we do not want to start the new static pod until the old static pod is gracefully terminated.
podFullName := kubecontainer.GetPodFullName(pod)
if kl.podKiller.IsMirrorPodPendingTerminationByPodName(podFullName) {
return fmt.Errorf("pod %q is pending termination", podFullName)
}
// Latency measurements for the main workflow are relative to the // Latency measurements for the main workflow are relative to the
// first time the pod was seen by the API server. // first time the pod was seen by the API server.
var firstSeenTime time.Time var firstSeenTime time.Time
@ -1640,7 +1665,6 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
// Create Mirror Pod for Static Pod if it doesn't already exist // Create Mirror Pod for Static Pod if it doesn't already exist
if kubetypes.IsStaticPod(pod) { if kubetypes.IsStaticPod(pod) {
podFullName := kubecontainer.GetPodFullName(pod)
deleted := false deleted := false
if mirrorPod != nil { if mirrorPod != nil {
if mirrorPod.DeletionTimestamp != nil || !kl.podManager.IsMirrorPodOf(mirrorPod, pod) { if mirrorPod.DeletionTimestamp != nil || !kl.podManager.IsMirrorPodOf(mirrorPod, pod) {
@ -1764,7 +1788,10 @@ func (kl *Kubelet) deletePod(pod *v1.Pod) error {
} }
podPair := kubecontainer.PodPair{APIPod: pod, RunningPod: &runningPod} podPair := kubecontainer.PodPair{APIPod: pod, RunningPod: &runningPod}
kl.podKillingCh <- &podPair if _, ok := kl.podManager.GetMirrorPodByPod(pod); ok {
kl.podKiller.MarkMirrorPodPendingTermination(pod)
}
kl.podKiller.KillPod(&podPair)
// TODO: delete the mirror pod here? // TODO: delete the mirror pod here?
// We leave the volume/directory cleanup to the periodic cleanup routine. // We leave the volume/directory cleanup to the periodic cleanup routine.

View File

@ -22,6 +22,7 @@ import (
"io/ioutil" "io/ioutil"
"net" "net"
"path/filepath" "path/filepath"
"time"
cadvisorapiv1 "github.com/google/cadvisor/info/v1" cadvisorapiv1 "github.com/google/cadvisor/info/v1"
"k8s.io/klog" "k8s.io/klog"
@ -30,6 +31,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -232,6 +234,15 @@ func (kl *Kubelet) GetNode() (*v1.Node, error) {
if kl.kubeClient == nil { if kl.kubeClient == nil {
return kl.initialNode(context.TODO()) return kl.initialNode(context.TODO())
} }
// if we have a valid kube client, we wait for initial lister to sync
if !kl.nodeHasSynced() {
err := wait.PollImmediate(time.Second, maxWaitForAPIServerSync, func() (bool, error) {
return kl.nodeHasSynced(), nil
})
if err != nil {
return nil, fmt.Errorf("nodes have not yet been read at least once, cannot construct node object")
}
}
return kl.nodeLister.Get(string(kl.nodeName)) return kl.nodeLister.Get(string(kl.nodeName))
} }
@ -242,7 +253,7 @@ func (kl *Kubelet) GetNode() (*v1.Node, error) {
// zero capacity, and the default labels. // zero capacity, and the default labels.
func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) { func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) {
if kl.kubeClient != nil { if kl.kubeClient != nil {
if n, err := kl.nodeLister.Get(string(kl.nodeName)); err == nil { if n, err := kl.GetNode(); err == nil {
return n, nil return n, nil
} }
} }

View File

@ -70,6 +70,11 @@ import (
const ( const (
managedHostsHeader = "# Kubernetes-managed hosts file.\n" managedHostsHeader = "# Kubernetes-managed hosts file.\n"
managedHostsHeaderWithHostNetwork = "# Kubernetes-managed hosts file (host network).\n" managedHostsHeaderWithHostNetwork = "# Kubernetes-managed hosts file (host network).\n"
// Capacity of the channel for storing pods to kill. A small number should
// suffice because a goroutine is dedicated to check the channel and does
// not block on anything else.
podKillingChannelCapacity = 50
) )
// Get a list of pods that have data directories. // Get a list of pods that have data directories.
@ -1010,6 +1015,23 @@ func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Po
kl.statusManager.RemoveOrphanedStatuses(podUIDs) kl.statusManager.RemoveOrphanedStatuses(podUIDs)
} }
// deleteOrphanedMirrorPods checks whether pod killer has done with orphaned mirror pod.
// If pod killing is done, podManager.DeleteMirrorPod() is called to delete mirror pod
// from the API server
func (kl *Kubelet) deleteOrphanedMirrorPods() {
podFullNames := kl.podManager.GetOrphanedMirrorPodNames()
for _, podFullname := range podFullNames {
if !kl.podKiller.IsMirrorPodPendingTerminationByPodName(podFullname) {
_, err := kl.podManager.DeleteMirrorPod(podFullname, nil)
if err != nil {
klog.Errorf("encountered error when deleting mirror pod %q : %v", podFullname, err)
} else {
klog.V(3).Infof("deleted pod %q", podFullname)
}
}
}
}
// HandlePodCleanups performs a series of cleanup work, including terminating // HandlePodCleanups performs a series of cleanup work, including terminating
// pod workers, killing unwanted pods, and removing orphaned volumes/pod // pod workers, killing unwanted pods, and removing orphaned volumes/pod
// directories. // directories.
@ -1061,7 +1083,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
} }
for _, pod := range runningPods { for _, pod := range runningPods {
if _, found := desiredPods[pod.ID]; !found { if _, found := desiredPods[pod.ID]; !found {
kl.podKillingCh <- &kubecontainer.PodPair{APIPod: nil, RunningPod: pod} kl.podKiller.KillPod(&kubecontainer.PodPair{APIPod: nil, RunningPod: pod})
} }
} }
@ -1089,24 +1111,112 @@ func (kl *Kubelet) HandlePodCleanups() error {
} }
// Remove any orphaned mirror pods. // Remove any orphaned mirror pods.
kl.podManager.DeleteOrphanedMirrorPods() kl.deleteOrphanedMirrorPods()
// Remove any cgroups in the hierarchy for pods that are no longer running. // Remove any cgroups in the hierarchy for pods that are no longer running.
if kl.cgroupsPerQOS { if kl.cgroupsPerQOS {
kl.cleanupOrphanedPodCgroups(cgroupPods, activePods) pcm := kl.containerManager.NewPodContainerManager()
kl.cleanupOrphanedPodCgroups(pcm, cgroupPods, activePods)
} }
kl.backOff.GC() kl.backOff.GC()
return nil return nil
} }
// podKiller launches a goroutine to kill a pod received from the channel if // PodKiller handles requests for killing pods
type PodKiller interface {
// KillPod receives pod speficier representing the pod to kill
KillPod(pair *kubecontainer.PodPair)
// PerformPodKillingWork performs the actual pod killing work via calling CRI
// It returns after its Close() func is called and all outstanding pod killing requests are served
PerformPodKillingWork()
// After Close() is called, this pod killer wouldn't accept any more pod killing requests
Close()
// IsMirrorPodPendingTerminationByPodName checks whether the mirror pod for the given full pod name is pending termination
IsMirrorPodPendingTerminationByPodName(podFullname string) bool
// IsMirrorPodPendingTerminationByUID checks whether the mirror pod for the given uid is pending termination
IsMirrorPodPendingTerminationByUID(uid types.UID) bool
// MarkMirrorPodPendingTermination marks the mirror pod entering grace period of termination
MarkMirrorPodPendingTermination(pod *v1.Pod)
}
// podKillerWithChannel is an implementation of PodKiller which receives pod killing requests via channel
type podKillerWithChannel struct {
// Channel for getting pods to kill.
podKillingCh chan *kubecontainer.PodPair
// lock for synchronization between HandlePodCleanups and pod killer
podKillingLock *sync.Mutex
// mirrorPodTerminationMap keeps track of the progress of mirror pod termination
// The key is the UID of the pod and the value is the full name of the pod
mirrorPodTerminationMap map[string]string
// killPod is the func which invokes runtime to kill the pod
killPod func(pod *v1.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error
}
// NewPodKiller returns a functional PodKiller
func NewPodKiller(kl *Kubelet) PodKiller {
podKiller := &podKillerWithChannel{
podKillingCh: make(chan *kubecontainer.PodPair, podKillingChannelCapacity),
podKillingLock: &sync.Mutex{},
mirrorPodTerminationMap: make(map[string]string),
killPod: kl.killPod,
}
return podKiller
}
// IsMirrorPodPendingTerminationByUID checks whether the pod for the given uid is pending termination
func (pk *podKillerWithChannel) IsMirrorPodPendingTerminationByUID(uid types.UID) bool {
pk.podKillingLock.Lock()
defer pk.podKillingLock.Unlock()
_, ok := pk.mirrorPodTerminationMap[string(uid)]
return ok
}
// IsMirrorPodPendingTerminationByPodName checks whether the given pod is in grace period of termination
func (pk *podKillerWithChannel) IsMirrorPodPendingTerminationByPodName(podFullname string) bool {
pk.podKillingLock.Lock()
defer pk.podKillingLock.Unlock()
for _, name := range pk.mirrorPodTerminationMap {
if name == podFullname {
return true
}
}
return false
}
func (pk *podKillerWithChannel) markMirrorPodTerminated(uid string) {
pk.podKillingLock.Lock()
klog.V(4).Infof("marking pod termination %q", uid)
delete(pk.mirrorPodTerminationMap, uid)
pk.podKillingLock.Unlock()
}
// MarkMirrorPodPendingTermination marks the pod entering grace period of termination
func (pk *podKillerWithChannel) MarkMirrorPodPendingTermination(pod *v1.Pod) {
fullname := kubecontainer.GetPodFullName(pod)
klog.V(3).Infof("marking pod pending termination %q", string(pod.UID))
pk.podKillingLock.Lock()
pk.mirrorPodTerminationMap[string(pod.UID)] = fullname
pk.podKillingLock.Unlock()
}
// Close closes the channel through which requests are delivered
func (pk *podKillerWithChannel) Close() {
close(pk.podKillingCh)
}
// KillPod sends pod killing request to the killer
func (pk *podKillerWithChannel) KillPod(pair *kubecontainer.PodPair) {
pk.podKillingCh <- pair
}
// PerformPodKillingWork launches a goroutine to kill a pod received from the channel if
// another goroutine isn't already in action. // another goroutine isn't already in action.
func (kl *Kubelet) podKiller() { func (pk *podKillerWithChannel) PerformPodKillingWork() {
killing := sets.NewString() killing := sets.NewString()
// guard for the killing set // guard for the killing set
lock := sync.Mutex{} lock := sync.Mutex{}
for podPair := range kl.podKillingCh { for podPair := range pk.podKillingCh {
runningPod := podPair.RunningPod runningPod := podPair.RunningPod
apiPod := podPair.APIPod apiPod := podPair.APIPod
@ -1120,13 +1230,14 @@ func (kl *Kubelet) podKiller() {
if !exists { if !exists {
go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod) { go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod) {
klog.V(2).Infof("Killing unwanted pod %q", runningPod.Name) klog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
err := kl.killPod(apiPod, runningPod, nil, nil) err := pk.killPod(apiPod, runningPod, nil, nil)
if err != nil { if err != nil {
klog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err) klog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
} }
lock.Lock() lock.Lock()
killing.Delete(string(runningPod.ID)) killing.Delete(string(runningPod.ID))
lock.Unlock() lock.Unlock()
pk.markMirrorPodTerminated(string(runningPod.ID))
}(apiPod, runningPod) }(apiPod, runningPod)
} }
} }
@ -1712,13 +1823,12 @@ func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID
// cleanupOrphanedPodCgroups removes cgroups that should no longer exist. // cleanupOrphanedPodCgroups removes cgroups that should no longer exist.
// it reconciles the cached state of cgroupPods with the specified list of runningPods // it reconciles the cached state of cgroupPods with the specified list of runningPods
func (kl *Kubelet) cleanupOrphanedPodCgroups(cgroupPods map[types.UID]cm.CgroupName, activePods []*v1.Pod) { func (kl *Kubelet) cleanupOrphanedPodCgroups(pcm cm.PodContainerManager, cgroupPods map[types.UID]cm.CgroupName, activePods []*v1.Pod) {
// Add all running pods to the set that we want to preserve // Add all running pods to the set that we want to preserve
podSet := sets.NewString() podSet := sets.NewString()
for _, pod := range activePods { for _, pod := range activePods {
podSet.Insert(string(pod.UID)) podSet.Insert(string(pod.UID))
} }
pcm := kl.containerManager.NewPodContainerManager()
// Iterate over all the found pods to verify if they should be running // Iterate over all the found pods to verify if they should be running
for uid, val := range cgroupPods { for uid, val := range cgroupPods {
@ -1727,6 +1837,11 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups(cgroupPods map[types.UID]cm.CgroupN
continue continue
} }
// if the pod is within termination grace period, we shouldn't cleanup the underlying cgroup
if kl.podKiller.IsMirrorPodPendingTerminationByUID(uid) {
klog.V(3).Infof("pod %q is pending termination", uid)
continue
}
// If volumes have not been unmounted/detached, do not delete the cgroup // If volumes have not been unmounted/detached, do not delete the cgroup
// so any memory backed volumes don't have their charges propagated to the // so any memory backed volumes don't have their charges propagated to the
// parent croup. If the volumes still exist, reduce the cpu shares for any // parent croup. If the volumes still exist, reduce the cpu shares for any

View File

@ -77,10 +77,8 @@ type Manager interface {
// this means deleting the mappings related to mirror pods. For non- // this means deleting the mappings related to mirror pods. For non-
// mirror pods, this means deleting from indexes for all non-mirror pods. // mirror pods, this means deleting from indexes for all non-mirror pods.
DeletePod(pod *v1.Pod) DeletePod(pod *v1.Pod)
// DeleteOrphanedMirrorPods deletes all mirror pods which do not have // GetOrphanedMirrorPodNames returns names of orphaned mirror pods
// associated static pods. This method sends deletion requests to the API GetOrphanedMirrorPodNames() []string
// server, but does NOT modify the internal pod storage in basicManager.
DeleteOrphanedMirrorPods()
// TranslatePodUID returns the actual UID of a pod. If the UID belongs to // TranslatePodUID returns the actual UID of a pod. If the UID belongs to
// a mirror pod, returns the UID of its static pod. Otherwise, returns the // a mirror pod, returns the UID of its static pod. Otherwise, returns the
// original UID. // original UID.
@ -323,7 +321,7 @@ func (pm *basicManager) GetUIDTranslations() (podToMirror map[kubetypes.Resolved
return podToMirror, mirrorToPod return podToMirror, mirrorToPod
} }
func (pm *basicManager) getOrphanedMirrorPodNames() []string { func (pm *basicManager) GetOrphanedMirrorPodNames() []string {
pm.lock.RLock() pm.lock.RLock()
defer pm.lock.RUnlock() defer pm.lock.RUnlock()
var podFullNames []string var podFullNames []string
@ -335,13 +333,6 @@ func (pm *basicManager) getOrphanedMirrorPodNames() []string {
return podFullNames return podFullNames
} }
func (pm *basicManager) DeleteOrphanedMirrorPods() {
podFullNames := pm.getOrphanedMirrorPodNames()
for _, podFullName := range podFullNames {
pm.MirrorClient.DeleteMirrorPod(podFullName, nil)
}
}
func (pm *basicManager) IsMirrorPodOf(mirrorPod, pod *v1.Pod) bool { func (pm *basicManager) IsMirrorPodOf(mirrorPod, pod *v1.Pod) bool {
// Check name and namespace first. // Check name and namespace first.
if pod.Name != mirrorPod.Name || pod.Namespace != mirrorPod.Namespace { if pod.Name != mirrorPod.Name || pod.Namespace != mirrorPod.Namespace {

View File

@ -11,6 +11,7 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/api/v1/pod:go_default_library", "//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/validation:go_default_library",
"//pkg/controller/volume/scheduling:go_default_library", "//pkg/controller/volume/scheduling:go_default_library",
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler/algorithmprovider:go_default_library", "//pkg/scheduler/algorithmprovider:go_default_library",

View File

@ -35,6 +35,7 @@ import (
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/klog" "k8s.io/klog"
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/controller/volume/scheduling" "k8s.io/kubernetes/pkg/controller/volume/scheduling"
kubefeatures "k8s.io/kubernetes/pkg/features" kubefeatures "k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
@ -375,7 +376,7 @@ func (sched *Scheduler) Run(ctx context.Context) {
func (sched *Scheduler) recordSchedulingFailure(prof *profile.Profile, podInfo *framework.PodInfo, err error, reason string, message string) { func (sched *Scheduler) recordSchedulingFailure(prof *profile.Profile, podInfo *framework.PodInfo, err error, reason string, message string) {
sched.Error(podInfo, err) sched.Error(podInfo, err)
pod := podInfo.Pod pod := podInfo.Pod
prof.Recorder.Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", message) prof.Recorder.Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", truncateMessage(message))
if err := sched.podConditionUpdater.update(pod, &v1.PodCondition{ if err := sched.podConditionUpdater.update(pod, &v1.PodCondition{
Type: v1.PodScheduled, Type: v1.PodScheduled,
Status: v1.ConditionFalse, Status: v1.ConditionFalse,
@ -386,6 +387,16 @@ func (sched *Scheduler) recordSchedulingFailure(prof *profile.Profile, podInfo *
} }
} }
// truncateMessage truncates a message if it hits the NoteLengthLimit.
func truncateMessage(message string) string {
max := validation.NoteLengthLimit
if len(message) <= max {
return message
}
suffix := " ..."
return message[:max-len(suffix)] + suffix
}
// preempt tries to create room for a pod that has failed to schedule, by preempting lower priority pods if possible. // preempt tries to create room for a pod that has failed to schedule, by preempting lower priority pods if possible.
// If it succeeds, it adds the name of the node where preemption has happened to the pod spec. // If it succeeds, it adds the name of the node where preemption has happened to the pod spec.
// It returns the node name and an error if any. // It returns the node name and an error if any.

View File

@ -28,6 +28,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",

View File

@ -22,6 +22,7 @@ import (
"strings" "strings"
"sync" "sync"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/utils/exec" "k8s.io/utils/exec"
"k8s.io/utils/mount" "k8s.io/utils/mount"
@ -455,6 +456,7 @@ type VolumePluginMgr struct {
plugins map[string]VolumePlugin plugins map[string]VolumePlugin
prober DynamicPluginProber prober DynamicPluginProber
probedPlugins map[string]VolumePlugin probedPlugins map[string]VolumePlugin
loggedDeprecationWarnings sets.String
Host VolumeHost Host VolumeHost
} }
@ -586,6 +588,7 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPlu
defer pm.mutex.Unlock() defer pm.mutex.Unlock()
pm.Host = host pm.Host = host
pm.loggedDeprecationWarnings = sets.NewString()
if prober == nil { if prober == nil {
// Use a dummy prober to prevent nil deference. // Use a dummy prober to prevent nil deference.
@ -682,9 +685,7 @@ func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) {
} }
// Issue warning if the matched provider is deprecated // Issue warning if the matched provider is deprecated
if detail, ok := deprecatedVolumeProviders[matches[0].GetPluginName()]; ok { pm.logDeprecation(matches[0].GetPluginName())
klog.Warningf("WARNING: %s built-in volume provider is now deprecated. %s", matches[0].GetPluginName(), detail)
}
return matches[0], nil return matches[0], nil
} }
@ -717,12 +718,20 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) {
} }
// Issue warning if the matched provider is deprecated // Issue warning if the matched provider is deprecated
if detail, ok := deprecatedVolumeProviders[matches[0].GetPluginName()]; ok { pm.logDeprecation(matches[0].GetPluginName())
klog.Warningf("WARNING: %s built-in volume provider is now deprecated. %s", matches[0].GetPluginName(), detail)
}
return matches[0], nil return matches[0], nil
} }
// logDeprecation logs warning when a deprecated plugin is used.
func (pm *VolumePluginMgr) logDeprecation(plugin string) {
if detail, ok := deprecatedVolumeProviders[plugin]; ok && !pm.loggedDeprecationWarnings.Has(plugin) {
klog.Warningf("WARNING: %s built-in volume provider is now deprecated. %s", plugin, detail)
// Make sure the message is logged only once. It has Warning severity
// and we don't want to spam the log too much.
pm.loggedDeprecationWarnings.Insert(plugin)
}
}
// Check if probedPlugin cache update is required. // Check if probedPlugin cache update is required.
// If it is, initialize all probed plugins and replace the cache with them. // If it is, initialize all probed plugins and replace the cache with them.
func (pm *VolumePluginMgr) refreshProbedPlugins() { func (pm *VolumePluginMgr) refreshProbedPlugins() {

View File

@ -181,11 +181,40 @@ func (az *Cloud) CreateOrUpdateSecurityGroup(service *v1.Service, sg network.Sec
return rerr.Error() return rerr.Error()
} }
func cleanupSubnetInFrontendIPConfigurations(lb *network.LoadBalancer) network.LoadBalancer {
if lb.LoadBalancerPropertiesFormat == nil || lb.FrontendIPConfigurations == nil {
return *lb
}
frontendIPConfigurations := *lb.FrontendIPConfigurations
for i := range frontendIPConfigurations {
config := frontendIPConfigurations[i]
if config.FrontendIPConfigurationPropertiesFormat != nil &&
config.Subnet != nil &&
config.Subnet.ID != nil {
subnet := network.Subnet{
ID: config.Subnet.ID,
}
if config.Subnet.Name != nil {
subnet.Name = config.FrontendIPConfigurationPropertiesFormat.Subnet.Name
}
config.FrontendIPConfigurationPropertiesFormat.Subnet = &subnet
frontendIPConfigurations[i] = config
continue
}
}
lb.FrontendIPConfigurations = &frontendIPConfigurations
return *lb
}
// CreateOrUpdateLB invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry // CreateOrUpdateLB invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error { func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error {
ctx, cancel := getContextWithCancel() ctx, cancel := getContextWithCancel()
defer cancel() defer cancel()
lb = cleanupSubnetInFrontendIPConfigurations(&lb)
rgName := az.getLoadBalancerResourceGroup() rgName := az.getLoadBalancerResourceGroup()
rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, *lb.Name, lb, to.String(lb.Etag)) rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, *lb.Name, lb, to.String(lb.Etag))
klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name) klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)

View File

@ -1363,10 +1363,15 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
vmssNamesMap[vmSetName] = true vmssNamesMap[vmSetName] = true
} }
vmssUpdaters := make([]func() error, 0, len(vmssNamesMap))
errors := make([]error, 0, len(vmssNamesMap))
for vmssName := range vmssNamesMap { for vmssName := range vmssNamesMap {
vmssName := vmssName
vmss, err := ss.getVMSS(vmssName, azcache.CacheReadTypeDefault) vmss, err := ss.getVMSS(vmssName, azcache.CacheReadTypeDefault)
if err != nil { if err != nil {
return err klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get VMSS %s: %v", vmssName, err)
errors = append(errors, err)
continue
} }
// When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
@ -1382,11 +1387,15 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
vmssNIC := *vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations vmssNIC := *vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
primaryNIC, err := ss.getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssName) primaryNIC, err := ss.getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssName)
if err != nil { if err != nil {
return err klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get the primary network interface config of the VMSS %s: %v", vmssName, err)
errors = append(errors, err)
continue
} }
primaryIPConfig, err := getPrimaryIPConfigFromVMSSNetworkConfig(primaryNIC) primaryIPConfig, err := getPrimaryIPConfigFromVMSSNetworkConfig(primaryNIC)
if err != nil { if err != nil {
return err klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to the primary IP config from the VMSS %s's network config : %v", vmssName, err)
errors = append(errors, err)
continue
} }
loadBalancerBackendAddressPools := []compute.SubResource{} loadBalancerBackendAddressPools := []compute.SubResource{}
if primaryIPConfig.LoadBalancerBackendAddressPools != nil { if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
@ -1407,6 +1416,7 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
continue continue
} }
vmssUpdaters = append(vmssUpdaters, func() error {
// Compose a new vmss with added backendPoolID. // Compose a new vmss with added backendPoolID.
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
newVMSS := compute.VirtualMachineScaleSet{ newVMSS := compute.VirtualMachineScaleSet{
@ -1424,9 +1434,21 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
klog.V(2).Infof("ensureBackendPoolDeletedFromVMSS begins to update vmss(%s) with backendPoolID %s", vmssName, backendPoolID) klog.V(2).Infof("ensureBackendPoolDeletedFromVMSS begins to update vmss(%s) with backendPoolID %s", vmssName, backendPoolID)
rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS) rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS)
if rerr != nil { if rerr != nil {
klog.Errorf("ensureBackendPoolDeletedFromVMSS CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, err) klog.Errorf("ensureBackendPoolDeletedFromVMSS CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, rerr)
return rerr.Error() return rerr.Error()
} }
return nil
})
}
errs := utilerrors.AggregateGoroutines(vmssUpdaters...)
if errs != nil {
return utilerrors.Flatten(errs)
}
// Fail if there are other errors.
if len(errors) > 0 {
return utilerrors.Flatten(utilerrors.NewAggregate(errors))
} }
return nil return nil

42
vendor/modules.txt vendored
View File

@ -1145,7 +1145,7 @@ gopkg.in/square/go-jose.v2/jwt
gopkg.in/warnings.v0 gopkg.in/warnings.v0
# gopkg.in/yaml.v2 v2.2.8 # gopkg.in/yaml.v2 v2.2.8
gopkg.in/yaml.v2 gopkg.in/yaml.v2
# k8s.io/api v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.15-k3s1 # k8s.io/api v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.16-k3s1
k8s.io/api/admission/v1 k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1 k8s.io/api/admission/v1beta1
k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1
@ -1189,7 +1189,7 @@ k8s.io/api/settings/v1alpha1
k8s.io/api/storage/v1 k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1 k8s.io/api/storage/v1beta1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.15-k3s1 # k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.16-k3s1
k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apihelpers
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
@ -1229,7 +1229,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
k8s.io/apiextensions-apiserver/pkg/registry/customresource k8s.io/apiextensions-apiserver/pkg/registry/customresource
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
# k8s.io/apimachinery v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.15-k3s1 # k8s.io/apimachinery v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.16-k3s1
k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/meta
@ -1291,7 +1291,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.15-k3s1 # k8s.io/apiserver v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.16-k3s1
k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/configuration
k8s.io/apiserver/pkg/admission/initializer k8s.io/apiserver/pkg/admission/initializer
@ -1421,7 +1421,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook
# k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.15-k3s1 # k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.16-k3s1
k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/kustomize k8s.io/cli-runtime/pkg/kustomize
k8s.io/cli-runtime/pkg/kustomize/k8sdeps k8s.io/cli-runtime/pkg/kustomize/k8sdeps
@ -1434,7 +1434,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource k8s.io/cli-runtime/pkg/resource
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.15-k3s1 # k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.16-k3s1
k8s.io/client-go/discovery k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached k8s.io/client-go/discovery/cached
k8s.io/client-go/discovery/cached/disk k8s.io/client-go/discovery/cached/disk
@ -1668,7 +1668,7 @@ k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue k8s.io/client-go/util/workqueue
# k8s.io/cloud-provider v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.15-k3s1 # k8s.io/cloud-provider v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.16-k3s1
k8s.io/cloud-provider k8s.io/cloud-provider
k8s.io/cloud-provider/api k8s.io/cloud-provider/api
k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/node/helpers
@ -1676,13 +1676,13 @@ k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/volume/helpers k8s.io/cloud-provider/volume/helpers
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.15-k3s1 # k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.16-k3s1
k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/api
k8s.io/cluster-bootstrap/token/jws k8s.io/cluster-bootstrap/token/jws
k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/token/util
k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/secrets
k8s.io/cluster-bootstrap/util/tokens k8s.io/cluster-bootstrap/util/tokens
# k8s.io/code-generator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.15-k3s1 # k8s.io/code-generator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.16-k3s1
k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/args
k8s.io/code-generator/cmd/client-gen/generators k8s.io/code-generator/cmd/client-gen/generators
k8s.io/code-generator/cmd/client-gen/generators/fake k8s.io/code-generator/cmd/client-gen/generators/fake
@ -1697,7 +1697,7 @@ k8s.io/code-generator/cmd/lister-gen/args
k8s.io/code-generator/cmd/lister-gen/generators k8s.io/code-generator/cmd/lister-gen/generators
k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/namer
k8s.io/code-generator/pkg/util k8s.io/code-generator/pkg/util
# k8s.io/component-base v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.15-k3s1 # k8s.io/component-base v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.16-k3s1
k8s.io/component-base/cli/flag k8s.io/component-base/cli/flag
k8s.io/component-base/cli/globalflag k8s.io/component-base/cli/globalflag
k8s.io/component-base/codec k8s.io/component-base/codec
@ -1715,10 +1715,10 @@ k8s.io/component-base/metrics/prometheus/workqueue
k8s.io/component-base/metrics/testutil k8s.io/component-base/metrics/testutil
k8s.io/component-base/version k8s.io/component-base/version
k8s.io/component-base/version/verflag k8s.io/component-base/version/verflag
# k8s.io/cri-api v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.15-k3s1 # k8s.io/cri-api v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.16-k3s1
k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis
k8s.io/cri-api/pkg/apis/runtime/v1alpha2 k8s.io/cri-api/pkg/apis/runtime/v1alpha2
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.15-k3s1 # k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.16-k3s1
k8s.io/csi-translation-lib k8s.io/csi-translation-lib
k8s.io/csi-translation-lib/plugins k8s.io/csi-translation-lib/plugins
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 # k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
@ -1733,7 +1733,7 @@ k8s.io/gengo/types
k8s.io/heapster/metrics/api/v1/types k8s.io/heapster/metrics/api/v1/types
# k8s.io/klog v1.0.0 # k8s.io/klog v1.0.0
k8s.io/klog k8s.io/klog
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.15-k3s1 # k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.16-k3s1
k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration
k8s.io/kube-aggregator/pkg/apis/apiregistration/install k8s.io/kube-aggregator/pkg/apis/apiregistration/install
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
@ -1761,7 +1761,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
k8s.io/kube-aggregator/pkg/registry/apiservice k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
k8s.io/kube-aggregator/pkg/registry/apiservice/rest k8s.io/kube-aggregator/pkg/registry/apiservice/rest
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.15-k3s1 # k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.16-k3s1
k8s.io/kube-controller-manager/config/v1alpha1 k8s.io/kube-controller-manager/config/v1alpha1
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 # k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
k8s.io/kube-openapi/pkg/aggregator k8s.io/kube-openapi/pkg/aggregator
@ -1772,14 +1772,14 @@ k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util
k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/util/proto/validation
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.15-k3s1 # k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.16-k3s1
k8s.io/kube-proxy/config/v1alpha1 k8s.io/kube-proxy/config/v1alpha1
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.15-k3s1 # k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.16-k3s1
k8s.io/kube-scheduler/config/v1 k8s.io/kube-scheduler/config/v1
k8s.io/kube-scheduler/config/v1alpha1 k8s.io/kube-scheduler/config/v1alpha1
k8s.io/kube-scheduler/config/v1alpha2 k8s.io/kube-scheduler/config/v1alpha2
k8s.io/kube-scheduler/extender/v1 k8s.io/kube-scheduler/extender/v1
# k8s.io/kubectl v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.15-k3s1 # k8s.io/kubectl v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.16-k3s1
k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/apps
k8s.io/kubectl/pkg/cmd k8s.io/kubectl/pkg/cmd
k8s.io/kubectl/pkg/cmd/annotate k8s.io/kubectl/pkg/cmd/annotate
@ -1854,11 +1854,11 @@ k8s.io/kubectl/pkg/util/storage
k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/templates
k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/validation k8s.io/kubectl/pkg/validation
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.15-k3s1 # k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.16-k3s1
k8s.io/kubelet/config/v1beta1 k8s.io/kubelet/config/v1beta1
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/pluginregistration/v1
# k8s.io/kubernetes v1.18.15 => github.com/k3s-io/kubernetes v1.18.15-k3s1 # k8s.io/kubernetes v1.18.16 => github.com/k3s-io/kubernetes v1.18.16-k3s1
k8s.io/kubernetes/cmd/cloud-controller-manager/app k8s.io/kubernetes/cmd/cloud-controller-manager/app
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
@ -2601,7 +2601,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/simple
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
k8s.io/kubernetes/third_party/forked/ipvs k8s.io/kubernetes/third_party/forked/ipvs
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.15-k3s1 # k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.16-k3s1
k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/aws
k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure
k8s.io/legacy-cloud-providers/azure/auth k8s.io/legacy-cloud-providers/azure/auth
@ -2632,7 +2632,7 @@ k8s.io/legacy-cloud-providers/openstack
k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere
k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.15-k3s1 # k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.16-k3s1
k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2