mirror of https://github.com/k3s-io/k3s
Update to v1.18.16 (#2950)
* Update to v1.18.16 Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Update to v1.18.16 Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>pull/3084/head v1.18.16+k3s1
parent
b8f90a031a
commit
8c7dd1395f
52
go.mod
52
go.mod
|
@ -33,31 +33,31 @@ replace (
|
|||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
||||
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
|
||||
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.15-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.15-k3s1
|
||||
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.15-k3s1
|
||||
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.15-k3s1
|
||||
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.15-k3s1
|
||||
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.15-k3s1
|
||||
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.15-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.15-k3s1
|
||||
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.15-k3s1
|
||||
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.15-k3s1
|
||||
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.15-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.15-k3s1
|
||||
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.15-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.15-k3s1
|
||||
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.15-k3s1
|
||||
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.15-k3s1
|
||||
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.15-k3s1
|
||||
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.15-k3s1
|
||||
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.18.15-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.15-k3s1
|
||||
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.15-k3s1
|
||||
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.18.15-k3s1
|
||||
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.15-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.15-k3s1
|
||||
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.18.15-k3s1
|
||||
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.16-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.16-k3s1
|
||||
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.16-k3s1
|
||||
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.16-k3s1
|
||||
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.16-k3s1
|
||||
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.16-k3s1
|
||||
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.16-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.16-k3s1
|
||||
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.16-k3s1
|
||||
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.16-k3s1
|
||||
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.16-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.16-k3s1
|
||||
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.16-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.16-k3s1
|
||||
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.16-k3s1
|
||||
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.16-k3s1
|
||||
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.16-k3s1
|
||||
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.16-k3s1
|
||||
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.18.16-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.16-k3s1
|
||||
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.16-k3s1
|
||||
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.18.16-k3s1
|
||||
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.16-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.16-k3s1
|
||||
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.18.16-k3s1
|
||||
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
||||
)
|
||||
|
||||
|
@ -124,5 +124,5 @@ require (
|
|||
k8s.io/component-base v0.0.0
|
||||
k8s.io/cri-api v0.0.0
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/kubernetes v1.18.15
|
||||
k8s.io/kubernetes v1.18.16
|
||||
)
|
||||
|
|
86
go.sum
86
go.sum
|
@ -458,49 +458,49 @@ github.com/k3s-io/cri v1.3.0-k3s.9 h1:1PUJysV7YQElM5JEjnHdOQ2kgwtTiSJ5fpx1j4/Wjz
|
|||
github.com/k3s-io/cri v1.3.0-k3s.9/go.mod h1:fGPUUHMKQik/vIegSe05DtX/m4miovdtvVLqRUFAkK0=
|
||||
github.com/k3s-io/helm-controller v0.8.3 h1:GWxavyMz7Bw2ClxH5okkeOL8o5U6IBK7uauc44SDCjU=
|
||||
github.com/k3s-io/helm-controller v0.8.3/go.mod h1:nZP8FH3KZrNNUf5r+SwwiMR63HS6lxdHdpHijgPfF74=
|
||||
github.com/k3s-io/kubernetes v1.18.15-k3s1 h1:hcQXepHGxTP7G3FyoHFbOjDKni1xq2vWIIoFhlBG83M=
|
||||
github.com/k3s-io/kubernetes v1.18.15-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.15-k3s1 h1:ERXYS6TQ2qlSyPhlxJbv6n/2/aZ0WEu0AIGufSy2afQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.15-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.15-k3s1 h1:a3sOwx5FCPW0uwoqqWChUw/IJ9FVL8Zv89/WNaqqCIQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.15-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.15-k3s1 h1:12DQjfDbSpRDcbNh6WXL6SiqT4cIR0O1bCAQVucz730=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.15-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.15-k3s1 h1:8Nbmny/CjMkCv/qxsRo2x024xDQMq+k3UFy5R7JeIxQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.15-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.15-k3s1 h1:a1OePV28qNA21Tv+FXNL7jsTt1YZhwx9miAeL4AGw78=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.15-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.15-k3s1 h1:mVqozxuZ1SmQPTBNaEWxn7mRO5/WW0LJpDY0yhwBN/c=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.15-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.15-k3s1 h1:HDdj8TJWO0ehGltPqYInSz9j3ZVmTk3kv7INSat6n/k=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.15-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.15-k3s1 h1:dvdp5/pqf97qXZmEE5UCxrA4CZeMqP9OvgehNsgJ9Tg=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.15-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.15-k3s1 h1:K+RmscxNNgyCoOsUxGf3hFK5JWz8ZuPhh6o83B/rIEI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.15-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.15-k3s1 h1:YP+cvLB8w0n4WbFrgu4mBMl2es2lf2phFC5N9MMs46U=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.15-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.15-k3s1 h1:MvO8O98sFkRtRNe9QnrnPtB4ofkIuFqbZ+VpOWNzu8A=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.15-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.15-k3s1 h1:ue0NXiNu2vf9M6t7lDs3B9EzEpi5TittToIa8oMpnrg=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.15-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.15-k3s1 h1:q2e6GYEnrpN32T8KB2aFLgysnN4RAowEJCu44xmcK8Y=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.15-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.15-k3s1 h1:SiEKqX854ZFJMypiSBZ9ZbKNVhL55XckQd3YUjUrY3Q=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.15-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.15-k3s1 h1:69g241Ox8rJJk3FXsOOL1aO+kiy2I5elZagM0+QZM5w=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.15-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.15-k3s1 h1:Ujtu5t3T3Tnwr62aZYdpLxg/VYyt5aViSbgW/Aj6rzo=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.15-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.15-k3s1 h1:xtxBmtb9TyIYjGHi882xXpCvM/tQw27uqmtwQXeGQK8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.15-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.15-k3s1 h1:q4gZT6sv7X2mj0tgkIXK5TzItWh4zSGhchJr+NbfeVI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.15-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.15-k3s1 h1:48r/lsJ3vwhI5uW/PNcFgLSSBKGsVjUNBLUv14Y0dFk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.15-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.15-k3s1 h1:SEL4F76VARKNIBUmP34CIt6ZFHrr2wtBKVlSV730EtU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.15-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.15-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
|
||||
github.com/k3s-io/kubernetes v1.18.16-k3s1 h1:VOCBgjlSMQsZhU/Qn3Y0bny3y74tBjovp6m2VclqNbc=
|
||||
github.com/k3s-io/kubernetes v1.18.16-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.16-k3s1 h1:U+tqB0V02DqEl7zblwc0hLJKhmvubnkLGvEh/NVvGRc=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.16-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.16-k3s1 h1:iG62cqadChdENW46VQutzG9Oeck5BO7lKsNbudIYwp4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.16-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.16-k3s1 h1:hoIFgutUJrIjo6iOF0MOQXv6zJtD3srbg4Bdn/Gl2qM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.16-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.16-k3s1 h1:v0oEi/fzkt1mqpoPCsmPKFmW8jGS2zItcojQw6WmDq8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.16-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.16-k3s1 h1:16OlDNNaI/ixfPy94CPtk7bZjICaR9Wl4Eqg4t73x8U=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.16-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.16-k3s1 h1:bJc5E87t0lMfd6W+vMw7swO3D8ELIWGmkzNCiKTMJD8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.16-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.16-k3s1 h1:eiavvS0HQcpkepHwQE+x7TxjWzNYYHzxXupYFFBodus=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.16-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.16-k3s1 h1:7fXjYEQAriIqwuQE39xgZnMjzIRmBC6hPm134mT8LsA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.16-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.16-k3s1 h1:fmow5tJk9U0l7g1Et5Pzz5vj09P6IEKa/fjAXyL5ZQY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.16-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.16-k3s1 h1:TNKQ5XZ11bKFxXomanfTA4DbWz7MqAUPhFF+8T/tqTw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.16-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.16-k3s1 h1:z0SjsT0qnzA1u2HqNtFeeXZer6TDhiY0ov7caLogwng=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.16-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.16-k3s1 h1:9ytp165C2ywtg3pZsdy4fyI2nlWDfVkIlWsHj5Z+tNM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.16-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.16-k3s1 h1:3BJ4AzNapPb5XumVEBX53/HAmhnqsuv0GuCKE1sOxEU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.16-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.16-k3s1 h1:+0T2TH6YJGs/VSsXiPfU7ew1BJNSvrFRAw+H66jpMuU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.16-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.16-k3s1 h1:kvIfXmOsTwglK9EW9nhgIThQJZ/K2o75v3XmSlc19cM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.16-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.16-k3s1 h1:fpXQAwgsxQtVzEYXcJJ+I0y2XfmPZTND5+R9jOlL0Ag=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.16-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.16-k3s1 h1:ZNU6UhplF3o9bQgFW/kAxgzIYuQyJc5sy8gd7WfuCx0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.16-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.16-k3s1 h1:mEcew1xAHJjG7wGlXE88R5kz9NcIPNZI361WhPDHDIY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.16-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.16-k3s1 h1:9siB+L5MHHZs/IbN9NV43WYkvr9kLMjeFIEErfigf+w=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.16-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.16-k3s1 h1:7RC1767UxORogidkHORJTBi8pOjTWx6kD6wAi14oaj0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.16-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.16-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
|
||||
github.com/karrick/godirwalk v1.7.5 h1:VbzFqwXwNbAZoA6W5odrLr+hKK197CcENcPh6E/gJ0M=
|
||||
github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
|
|
|
@ -19,6 +19,7 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
|
@ -73,6 +74,7 @@ go_test(
|
|||
"//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
apitypes "k8s.io/apimachinery/pkg/types"
|
||||
apierrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
fcboot "k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
|
@ -221,14 +222,28 @@ func (cfgCtl *configController) initializeConfigController(informerFactory kubei
|
|||
}})
|
||||
}
|
||||
|
||||
// used from the unit tests only.
|
||||
func (cfgCtl *configController) getPriorityLevelState(plName string) *priorityLevelState {
|
||||
cfgCtl.lock.Lock()
|
||||
defer cfgCtl.lock.Unlock()
|
||||
return cfgCtl.priorityLevelStates[plName]
|
||||
}
|
||||
|
||||
func (cfgCtl *configController) Run(stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
// Let the config worker stop when we are done
|
||||
defer cfgCtl.configQueue.ShutDown()
|
||||
|
||||
klog.Info("Starting API Priority and Fairness config controller")
|
||||
if ok := cache.WaitForCacheSync(stopCh, cfgCtl.plInformerSynced, cfgCtl.fsInformerSynced); !ok {
|
||||
return fmt.Errorf("Never achieved initial sync")
|
||||
}
|
||||
|
||||
klog.Info("Running API Priority and Fairness config worker")
|
||||
wait.Until(cfgCtl.runWorker, time.Second, stopCh)
|
||||
go wait.Until(cfgCtl.runWorker, time.Second, stopCh)
|
||||
|
||||
<-stopCh
|
||||
klog.Info("Shutting down API Priority and Fairness config worker")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -98,21 +98,28 @@ func (cfgCtl *configController) Handle(ctx context.Context, requestDigest Reques
|
|||
}
|
||||
klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, queued=%v", requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt, queued)
|
||||
var executed bool
|
||||
idle := req.Finish(func() {
|
||||
idle, panicking := true, true
|
||||
defer func() {
|
||||
klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, queued=%v, Finish() => panicking=%v idle=%v",
|
||||
requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt, queued, panicking, idle)
|
||||
if idle {
|
||||
cfgCtl.maybeReap(pl.Name)
|
||||
}
|
||||
}()
|
||||
idle = req.Finish(func() {
|
||||
if queued {
|
||||
metrics.ObserveWaitingDuration(pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime))
|
||||
}
|
||||
metrics.AddDispatch(pl.Name, fs.Name)
|
||||
executed = true
|
||||
startExecutionTime := time.Now()
|
||||
defer func() {
|
||||
metrics.ObserveExecutionDuration(pl.Name, fs.Name, time.Since(startExecutionTime))
|
||||
}()
|
||||
execFn()
|
||||
metrics.ObserveExecutionDuration(pl.Name, fs.Name, time.Since(startExecutionTime))
|
||||
})
|
||||
if queued && !executed {
|
||||
metrics.ObserveWaitingDuration(pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime))
|
||||
}
|
||||
klog.V(7).Infof("Handle(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, isExempt=%v, queued=%v, Finish() => idle=%v", requestDigest, fs.Name, fs.Spec.DistinguisherMethod, pl.Name, isExempt, queued, idle)
|
||||
if idle {
|
||||
cfgCtl.maybeReap(pl.Name)
|
||||
}
|
||||
panicking = false
|
||||
}
|
||||
|
|
|
@ -299,8 +299,15 @@ func (req *request) Finish(execFn func()) bool {
|
|||
if !exec {
|
||||
return idle
|
||||
}
|
||||
execFn()
|
||||
return req.qs.finishRequestAndDispatchAsMuchAsPossible(req)
|
||||
func() {
|
||||
defer func() {
|
||||
idle = req.qs.finishRequestAndDispatchAsMuchAsPossible(req)
|
||||
}()
|
||||
|
||||
execFn()
|
||||
}()
|
||||
|
||||
return idle
|
||||
}
|
||||
|
||||
func (req *request) wait() (bool, bool) {
|
||||
|
|
|
@ -3,8 +3,8 @@ package version
|
|||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "18"
|
||||
gitVersion = "v1.18.15-k3s1"
|
||||
gitCommit = "92952488446669be6ff06c46c3e13de3fe395e75"
|
||||
gitVersion = "v1.18.16-k3s1"
|
||||
gitCommit = "97eee15c8b5c26daccd339bb0ed95a777e1c85a6"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2021-01-13T19:13:58Z"
|
||||
buildDate = "2021-02-18T20:13:50Z"
|
||||
)
|
||||
|
|
|
@ -3,8 +3,8 @@ package version
|
|||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "18"
|
||||
gitVersion = "v1.18.15-k3s1"
|
||||
gitCommit = "92952488446669be6ff06c46c3e13de3fe395e75"
|
||||
gitVersion = "v1.18.16-k3s1"
|
||||
gitCommit = "97eee15c8b5c26daccd339bb0ed95a777e1c85a6"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2021-01-13T19:13:58Z"
|
||||
buildDate = "2021-02-18T20:13:50Z"
|
||||
)
|
||||
|
|
|
@ -45,6 +45,7 @@ go_test(
|
|||
"azure_file_test.go",
|
||||
"gce_pd_test.go",
|
||||
"in_tree_volume_test.go",
|
||||
"openstack_cinder_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
|
|
@ -98,7 +98,7 @@ func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeInlineVolumeToCSI(vol
|
|||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Must be unique per disk as it is used as the unique part of the
|
||||
// staging path
|
||||
Name: fmt.Sprintf("%s-%s", AWSEBSDriverName, ebsSource.VolumeID),
|
||||
Name: fmt.Sprintf("%s-%s", AWSEBSDriverName, volumeHandle),
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
|
|
|
@ -37,11 +37,11 @@ const (
|
|||
volumeIDTemplate = "%s#%s#%s#%s"
|
||||
// Parameter names defined in azure file CSI driver, refer to
|
||||
// https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
|
||||
azureFileShareName = "shareName"
|
||||
|
||||
secretNameTemplate = "azure-storage-account-%s-secret"
|
||||
defaultSecretNamespace = "default"
|
||||
|
||||
shareNameField = "sharename"
|
||||
secretNameField = "secretname"
|
||||
secretNamespaceField = "secretnamespace"
|
||||
secretNameTemplate = "azure-storage-account-%s-secret"
|
||||
defaultSecretNamespace = "default"
|
||||
resourceGroupAnnotation = "kubernetes.io/azure-file-resource-group"
|
||||
)
|
||||
|
||||
|
@ -90,7 +90,7 @@ func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Vol
|
|||
Driver: AzureFileDriverName,
|
||||
VolumeHandle: fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, ""),
|
||||
ReadOnly: azureSource.ReadOnly,
|
||||
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
|
||||
VolumeAttributes: map[string]string{shareNameField: azureSource.ShareName},
|
||||
NodeStageSecretRef: &v1.SecretReference{
|
||||
Name: azureSource.SecretName,
|
||||
Namespace: defaultSecretNamespace,
|
||||
|
@ -135,7 +135,7 @@ func (t *azureFileCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
|
|||
Namespace: defaultSecretNamespace,
|
||||
},
|
||||
ReadOnly: azureSource.ReadOnly,
|
||||
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
|
||||
VolumeAttributes: map[string]string{shareNameField: azureSource.ShareName},
|
||||
VolumeHandle: volumeID,
|
||||
}
|
||||
)
|
||||
|
@ -163,31 +163,48 @@ func (t *azureFileCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume)
|
|||
ReadOnly: csiSource.ReadOnly,
|
||||
}
|
||||
|
||||
for k, v := range csiSource.VolumeAttributes {
|
||||
switch strings.ToLower(k) {
|
||||
case shareNameField:
|
||||
azureSource.ShareName = v
|
||||
case secretNameField:
|
||||
azureSource.SecretName = v
|
||||
case secretNamespaceField:
|
||||
ns := v
|
||||
azureSource.SecretNamespace = &ns
|
||||
}
|
||||
}
|
||||
|
||||
resourceGroup := ""
|
||||
if csiSource.NodeStageSecretRef != nil && csiSource.NodeStageSecretRef.Name != "" {
|
||||
azureSource.SecretName = csiSource.NodeStageSecretRef.Name
|
||||
azureSource.SecretNamespace = &csiSource.NodeStageSecretRef.Namespace
|
||||
if csiSource.VolumeAttributes != nil {
|
||||
if shareName, ok := csiSource.VolumeAttributes[azureFileShareName]; ok {
|
||||
azureSource.ShareName = shareName
|
||||
}
|
||||
}
|
||||
} else {
|
||||
}
|
||||
if azureSource.ShareName == "" || azureSource.SecretName == "" {
|
||||
rg, storageAccount, fileShareName, _, err := getFileShareInfo(csiSource.VolumeHandle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
azureSource.ShareName = fileShareName
|
||||
azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount)
|
||||
if azureSource.ShareName == "" {
|
||||
azureSource.ShareName = fileShareName
|
||||
}
|
||||
if azureSource.SecretName == "" {
|
||||
azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount)
|
||||
}
|
||||
resourceGroup = rg
|
||||
}
|
||||
|
||||
if azureSource.SecretNamespace == nil {
|
||||
ns := defaultSecretNamespace
|
||||
azureSource.SecretNamespace = &ns
|
||||
}
|
||||
|
||||
pv.Spec.CSI = nil
|
||||
pv.Spec.AzureFile = azureSource
|
||||
if pv.ObjectMeta.Annotations == nil {
|
||||
pv.ObjectMeta.Annotations = map[string]string{}
|
||||
}
|
||||
if resourceGroup != "" {
|
||||
if pv.ObjectMeta.Annotations == nil {
|
||||
pv.ObjectMeta.Annotations = map[string]string{}
|
||||
}
|
||||
pv.ObjectMeta.Annotations[resourceGroupAnnotation] = resourceGroup
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ package plugins
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
|
@ -45,6 +46,30 @@ func NewOpenStackCinderCSITranslator() InTreePlugin {
|
|||
|
||||
// TranslateInTreeStorageClassParametersToCSI translates InTree Cinder storage class parameters to CSI storage class
|
||||
func (t *osCinderCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.StorageClass) (*storage.StorageClass, error) {
|
||||
var (
|
||||
params = map[string]string{}
|
||||
)
|
||||
for k, v := range sc.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
case fsTypeKey:
|
||||
params[csiFsTypeKey] = v
|
||||
default:
|
||||
// All other parameters are supported by the CSI driver.
|
||||
// This includes also "availability", therefore do not translate it to sc.AllowedTopologies
|
||||
params[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(sc.AllowedTopologies) > 0 {
|
||||
newTopologies, err := translateAllowedTopologies(sc.AllowedTopologies, CinderTopologyKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed translating allowed topologies: %v", err)
|
||||
}
|
||||
sc.AllowedTopologies = newTopologies
|
||||
}
|
||||
|
||||
sc.Parameters = params
|
||||
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,10 @@ go_library(
|
|||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["available_controller_test.go"],
|
||||
srcs = [
|
||||
"available_controller_test.go",
|
||||
"metrics_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
@ -50,6 +53,7 @@ go_test(
|
|||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/metrics/testutil:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1:go_default_library",
|
||||
|
|
|
@ -42,6 +42,7 @@ import (
|
|||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/transport"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"k8s.io/klog"
|
||||
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
||||
apiregistrationv1apihelper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper"
|
||||
|
@ -51,6 +52,9 @@ import (
|
|||
"k8s.io/kube-aggregator/pkg/controllers"
|
||||
)
|
||||
|
||||
// making sure we only register metrics once into legacy registry
|
||||
var registerIntoLegacyRegistryOnce sync.Once
|
||||
|
||||
// ServiceResolver knows how to convert a service reference into an actual location.
|
||||
type ServiceResolver interface {
|
||||
ResolveEndpoint(namespace, name string, port int32) (*url.URL, error)
|
||||
|
@ -81,6 +85,9 @@ type AvailableConditionController struct {
|
|||
cache map[string]map[string][]string
|
||||
// this lock protects operations on the above cache
|
||||
cacheLock sync.RWMutex
|
||||
|
||||
// metrics registered into legacy registry
|
||||
metrics *availabilityMetrics
|
||||
}
|
||||
|
||||
// NewAvailableConditionController returns a new AvailableConditionController.
|
||||
|
@ -110,6 +117,7 @@ func NewAvailableConditionController(
|
|||
// the maximum disruption time to a minimum, but it does prevent hot loops.
|
||||
workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 30*time.Second),
|
||||
"AvailableConditionController"),
|
||||
metrics: newAvailabilityMetrics(),
|
||||
}
|
||||
|
||||
// if a particular transport was specified, use that otherwise build one
|
||||
|
@ -171,12 +179,21 @@ func NewAvailableConditionController(
|
|||
|
||||
c.syncFn = c.sync
|
||||
|
||||
// TODO: decouple from legacyregistry
|
||||
registerIntoLegacyRegistryOnce.Do(func() {
|
||||
err = c.metrics.Register(legacyregistry.Register, legacyregistry.CustomRegister)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *AvailableConditionController) sync(key string) error {
|
||||
originalAPIService, err := c.apiServiceLister.Get(key)
|
||||
if apierrors.IsNotFound(err) {
|
||||
c.metrics.ForgetAPIService(key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -194,7 +211,7 @@ func (c *AvailableConditionController) sync(key string) error {
|
|||
// local API services are always considered available
|
||||
if apiService.Spec.Service == nil {
|
||||
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, apiregistrationv1apihelper.NewLocalAvailableAPIServiceCondition())
|
||||
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService)
|
||||
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -204,14 +221,14 @@ func (c *AvailableConditionController) sync(key string) error {
|
|||
availableCondition.Reason = "ServiceNotFound"
|
||||
availableCondition.Message = fmt.Sprintf("service/%s in %q is not present", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace)
|
||||
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
|
||||
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService)
|
||||
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
|
||||
return err
|
||||
} else if err != nil {
|
||||
availableCondition.Status = apiregistrationv1.ConditionUnknown
|
||||
availableCondition.Reason = "ServiceAccessError"
|
||||
availableCondition.Message = fmt.Sprintf("service/%s in %q cannot be checked due to: %v", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, err)
|
||||
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
|
||||
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService)
|
||||
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -232,7 +249,7 @@ func (c *AvailableConditionController) sync(key string) error {
|
|||
availableCondition.Reason = "ServicePortError"
|
||||
availableCondition.Message = fmt.Sprintf("service/%s in %q is not listening on port %d", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, *apiService.Spec.Service.Port)
|
||||
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
|
||||
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService)
|
||||
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -242,14 +259,14 @@ func (c *AvailableConditionController) sync(key string) error {
|
|||
availableCondition.Reason = "EndpointsNotFound"
|
||||
availableCondition.Message = fmt.Sprintf("cannot find endpoints for service/%s in %q", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace)
|
||||
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
|
||||
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService)
|
||||
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
|
||||
return err
|
||||
} else if err != nil {
|
||||
availableCondition.Status = apiregistrationv1.ConditionUnknown
|
||||
availableCondition.Reason = "EndpointsAccessError"
|
||||
availableCondition.Message = fmt.Sprintf("service/%s in %q cannot be checked due to: %v", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, err)
|
||||
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
|
||||
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService)
|
||||
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
|
||||
return err
|
||||
}
|
||||
hasActiveEndpoints := false
|
||||
|
@ -270,7 +287,7 @@ func (c *AvailableConditionController) sync(key string) error {
|
|||
availableCondition.Reason = "MissingEndpoints"
|
||||
availableCondition.Message = fmt.Sprintf("endpoints for service/%s in %q have no addresses with port name %q", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, portName)
|
||||
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
|
||||
_, err := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService)
|
||||
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -343,7 +360,7 @@ func (c *AvailableConditionController) sync(key string) error {
|
|||
availableCondition.Reason = "FailedDiscoveryCheck"
|
||||
availableCondition.Message = lastError.Error()
|
||||
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
|
||||
_, updateErr := updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService)
|
||||
_, updateErr := c.updateAPIServiceStatus(originalAPIService, apiService)
|
||||
if updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
|
@ -356,26 +373,26 @@ func (c *AvailableConditionController) sync(key string) error {
|
|||
availableCondition.Reason = "Passed"
|
||||
availableCondition.Message = "all checks passed"
|
||||
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
|
||||
_, err = updateAPIServiceStatus(c.apiServiceClient, originalAPIService, apiService)
|
||||
_, err = c.updateAPIServiceStatus(originalAPIService, apiService)
|
||||
return err
|
||||
}
|
||||
|
||||
// updateAPIServiceStatus only issues an update if a change is detected. We have a tight resync loop to quickly detect dead
|
||||
// apiservices. Doing that means we don't want to quickly issue no-op updates.
|
||||
func updateAPIServiceStatus(client apiregistrationclient.APIServicesGetter, originalAPIService, newAPIService *apiregistrationv1.APIService) (*apiregistrationv1.APIService, error) {
|
||||
func (c *AvailableConditionController) updateAPIServiceStatus(originalAPIService, newAPIService *apiregistrationv1.APIService) (*apiregistrationv1.APIService, error) {
|
||||
// update this metric on every sync operation to reflect the actual state
|
||||
setUnavailableGauge(newAPIService)
|
||||
c.setUnavailableGauge(newAPIService)
|
||||
|
||||
if equality.Semantic.DeepEqual(originalAPIService.Status, newAPIService.Status) {
|
||||
return newAPIService, nil
|
||||
}
|
||||
|
||||
newAPIService, err := client.APIServices().UpdateStatus(context.TODO(), newAPIService, metav1.UpdateOptions{})
|
||||
newAPIService, err := c.apiServiceClient.APIServices().UpdateStatus(context.TODO(), newAPIService, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setUnavailableCounter(originalAPIService, newAPIService)
|
||||
c.setUnavailableCounter(originalAPIService, newAPIService)
|
||||
return newAPIService, nil
|
||||
}
|
||||
|
||||
|
@ -560,17 +577,17 @@ func (c *AvailableConditionController) deleteEndpoints(obj interface{}) {
|
|||
}
|
||||
|
||||
// setUnavailableGauge set the metrics so that it reflect the current state base on availability of the given service
|
||||
func setUnavailableGauge(newAPIService *apiregistrationv1.APIService) {
|
||||
func (c *AvailableConditionController) setUnavailableGauge(newAPIService *apiregistrationv1.APIService) {
|
||||
if apiregistrationv1apihelper.IsAPIServiceConditionTrue(newAPIService, apiregistrationv1.Available) {
|
||||
unavailableGauge.WithLabelValues(newAPIService.Name).Set(0.0)
|
||||
c.metrics.SetAPIServiceAvailable(newAPIService.Name)
|
||||
return
|
||||
}
|
||||
|
||||
unavailableGauge.WithLabelValues(newAPIService.Name).Set(1.0)
|
||||
c.metrics.SetAPIServiceUnavailable(newAPIService.Name)
|
||||
}
|
||||
|
||||
// setUnavailableCounter increases the metrics only if the given service is unavailable and its APIServiceCondition has changed
|
||||
func setUnavailableCounter(originalAPIService, newAPIService *apiregistrationv1.APIService) {
|
||||
func (c *AvailableConditionController) setUnavailableCounter(originalAPIService, newAPIService *apiregistrationv1.APIService) {
|
||||
wasAvailable := apiregistrationv1apihelper.IsAPIServiceConditionTrue(originalAPIService, apiregistrationv1.Available)
|
||||
isAvailable := apiregistrationv1apihelper.IsAPIServiceConditionTrue(newAPIService, apiregistrationv1.Available)
|
||||
statusChanged := isAvailable != wasAvailable
|
||||
|
@ -580,6 +597,6 @@ func setUnavailableCounter(originalAPIService, newAPIService *apiregistrationv1.
|
|||
if newCondition := apiregistrationv1apihelper.GetAPIServiceConditionByType(newAPIService, apiregistrationv1.Available); newCondition != nil {
|
||||
reason = newCondition.Reason
|
||||
}
|
||||
unavailableCounter.WithLabelValues(newAPIService.Name, reason).Inc()
|
||||
c.metrics.UnavailableCounter(newAPIService.Name, reason).Inc()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,8 +17,9 @@ limitations under the License.
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
)
|
||||
|
||||
/*
|
||||
|
@ -30,25 +31,120 @@ import (
|
|||
* the metric stability policy.
|
||||
*/
|
||||
var (
|
||||
unavailableCounter = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Name: "aggregator_unavailable_apiservice_count",
|
||||
Help: "Counter of APIServices which are marked as unavailable broken down by APIService name and reason.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"name", "reason"},
|
||||
)
|
||||
unavailableGauge = metrics.NewGaugeVec(
|
||||
&metrics.GaugeOpts{
|
||||
Name: "aggregator_unavailable_apiservice",
|
||||
Help: "Gauge of APIServices which are marked as unavailable broken down by APIService name.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
unavailableGaugeDesc = metrics.NewDesc(
|
||||
"aggregator_unavailable_apiservice",
|
||||
"Gauge of APIServices which are marked as unavailable broken down by APIService name.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
metrics.ALPHA,
|
||||
"",
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
legacyregistry.MustRegister(unavailableCounter)
|
||||
legacyregistry.MustRegister(unavailableGauge)
|
||||
type availabilityMetrics struct {
|
||||
unavailableCounter *metrics.CounterVec
|
||||
|
||||
*availabilityCollector
|
||||
}
|
||||
|
||||
func newAvailabilityMetrics() *availabilityMetrics {
|
||||
return &availabilityMetrics{
|
||||
unavailableCounter: metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Name: "aggregator_unavailable_apiservice_total",
|
||||
Help: "Counter of APIServices which are marked as unavailable broken down by APIService name and reason.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"name", "reason"},
|
||||
),
|
||||
availabilityCollector: newAvailabilityCollector(),
|
||||
}
|
||||
}
|
||||
|
||||
// Register registers apiservice availability metrics.
|
||||
func (m *availabilityMetrics) Register(
|
||||
registrationFunc func(metrics.Registerable) error,
|
||||
customRegistrationFunc func(metrics.StableCollector) error,
|
||||
) error {
|
||||
err := registrationFunc(m.unavailableCounter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = customRegistrationFunc(m.availabilityCollector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnavailableCounter returns a counter to track apiservices marked as unavailable.
|
||||
func (m *availabilityMetrics) UnavailableCounter(apiServiceName, reason string) metrics.CounterMetric {
|
||||
return m.unavailableCounter.WithLabelValues(apiServiceName, reason)
|
||||
}
|
||||
|
||||
type availabilityCollector struct {
|
||||
metrics.BaseStableCollector
|
||||
|
||||
mtx sync.RWMutex
|
||||
availabilities map[string]bool
|
||||
}
|
||||
|
||||
// Check if apiServiceStatusCollector implements necessary interface.
|
||||
var _ metrics.StableCollector = &availabilityCollector{}
|
||||
|
||||
func newAvailabilityCollector() *availabilityCollector {
|
||||
return &availabilityCollector{
|
||||
availabilities: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// DescribeWithStability implements the metrics.StableCollector interface.
|
||||
func (c *availabilityCollector) DescribeWithStability(ch chan<- *metrics.Desc) {
|
||||
ch <- unavailableGaugeDesc
|
||||
}
|
||||
|
||||
// CollectWithStability implements the metrics.StableCollector interface.
|
||||
func (c *availabilityCollector) CollectWithStability(ch chan<- metrics.Metric) {
|
||||
c.mtx.RLock()
|
||||
defer c.mtx.RUnlock()
|
||||
|
||||
for apiServiceName, isAvailable := range c.availabilities {
|
||||
gaugeValue := 1.0
|
||||
if isAvailable {
|
||||
gaugeValue = 0.0
|
||||
}
|
||||
ch <- metrics.NewLazyConstMetric(
|
||||
unavailableGaugeDesc,
|
||||
metrics.GaugeValue,
|
||||
gaugeValue,
|
||||
apiServiceName,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// SetAPIServiceAvailable sets the given apiservice availability gauge to available.
|
||||
func (c *availabilityCollector) SetAPIServiceAvailable(apiServiceKey string) {
|
||||
c.setAPIServiceAvailability(apiServiceKey, true)
|
||||
}
|
||||
|
||||
// SetAPIServiceUnavailable sets the given apiservice availability gauge to unavailable.
|
||||
func (c *availabilityCollector) SetAPIServiceUnavailable(apiServiceKey string) {
|
||||
c.setAPIServiceAvailability(apiServiceKey, false)
|
||||
}
|
||||
|
||||
func (c *availabilityCollector) setAPIServiceAvailability(apiServiceKey string, availability bool) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
c.availabilities[apiServiceKey] = availability
|
||||
}
|
||||
|
||||
// ForgetAPIService removes the availability gauge of the given apiservice.
|
||||
func (c *availabilityCollector) ForgetAPIService(apiServiceKey string) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
delete(c.availabilities, apiServiceKey)
|
||||
}
|
||||
|
|
|
@ -471,12 +471,12 @@ func (dc *DisruptionController) getPdbForPod(pod *v1.Pod) *policy.PodDisruptionB
|
|||
// IMPORTANT NOTE : the returned pods should NOT be modified.
|
||||
func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*v1.Pod, error) {
|
||||
sel, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)
|
||||
if sel.Empty() {
|
||||
return []*v1.Pod{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
if sel.Empty() {
|
||||
return []*v1.Pod{}, nil
|
||||
}
|
||||
pods, err := dc.podLister.Pods(pdb.Namespace).List(sel)
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
|
|
74
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
74
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
|
@ -411,7 +411,11 @@ func (adc *attachDetachController) populateActualStateOfWorld() error {
|
|||
adc.addNodeToDswp(node, types.NodeName(node.Name))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
err = adc.processVolumeAttachments()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to process volume attachments: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) getNodeVolumeDevicePath(
|
||||
|
@ -481,7 +485,12 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
|
|||
err)
|
||||
continue
|
||||
}
|
||||
if adc.actualStateOfWorld.IsVolumeAttachedToNode(volumeName, nodeName) {
|
||||
attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName)
|
||||
if attachState == cache.AttachStateAttached {
|
||||
klog.V(10).Infof("Volume %q is attached to node %q. Marking as attached in ActualStateOfWorld",
|
||||
volumeName,
|
||||
nodeName,
|
||||
)
|
||||
devicePath, err := adc.getNodeVolumeDevicePath(volumeName, nodeName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to find device path: %v", err)
|
||||
|
@ -695,6 +704,67 @@ func (adc *attachDetachController) processVolumesInUse(
|
|||
}
|
||||
}
|
||||
|
||||
// Process Volume-Attachment objects.
|
||||
// Should be called only after populating attached volumes in the ASW.
|
||||
// For each VA object, this function checks if its present in the ASW.
|
||||
// If not, adds the volume to ASW as an "uncertain" attachment.
|
||||
// In the reconciler, the logic checks if the volume is present in the DSW;
|
||||
// if yes, the reconciler will attempt attach on the volume;
|
||||
// if not (could be a dangling attachment), the reconciler will detach this volume.
|
||||
func (adc *attachDetachController) processVolumeAttachments() error {
|
||||
vas, err := adc.volumeAttachmentLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Errorf("failed to list VolumeAttachment objects: %v", err)
|
||||
return err
|
||||
}
|
||||
for _, va := range vas {
|
||||
nodeName := types.NodeName(va.Spec.NodeName)
|
||||
pvName := va.Spec.Source.PersistentVolumeName
|
||||
if pvName == nil {
|
||||
// Currently VA objects are created for CSI volumes only. nil pvName is unexpected, generate a warning
|
||||
klog.Warningf("Skipping the va as its pvName is nil, va.Name: %q, nodeName: %q",
|
||||
va.Name, nodeName)
|
||||
continue
|
||||
}
|
||||
pv, err := adc.pvLister.Get(*pvName)
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to lookup pv object for: %q, err: %v", *pvName, err)
|
||||
continue
|
||||
}
|
||||
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
|
||||
plugin, err := adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
|
||||
if err != nil || plugin == nil {
|
||||
// Currently VA objects are created for CSI volumes only. nil plugin is unexpected, generate a warning
|
||||
klog.Warningf(
|
||||
"Skipping processing the volume %q on nodeName: %q, no attacher interface found. err=%v",
|
||||
*pvName,
|
||||
nodeName,
|
||||
err)
|
||||
continue
|
||||
}
|
||||
volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
if err != nil {
|
||||
klog.Errorf(
|
||||
"Failed to find unique name for volume:%q, va.Name:%q, nodeName:%q: %v",
|
||||
*pvName,
|
||||
va.Name,
|
||||
nodeName,
|
||||
err)
|
||||
continue
|
||||
}
|
||||
attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName)
|
||||
if attachState == cache.AttachStateDetached {
|
||||
klog.V(1).Infof("Marking volume attachment as uncertain as volume:%q (%q) is not attached (%v)",
|
||||
volumeName, nodeName, attachState)
|
||||
err = adc.actualStateOfWorld.MarkVolumeAsUncertain(volumeName, volumeSpec, nodeName)
|
||||
if err != nil {
|
||||
klog.Errorf("MarkVolumeAsUncertain fail to add the volume %q (%q) to ASW. err: %s", volumeName, nodeName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ volume.VolumeHost = &attachDetachController{}
|
||||
var _ volume.AttachDetachVolumeHost = &attachDetachController{}
|
||||
|
||||
|
|
|
@ -96,10 +96,13 @@ type ActualStateOfWorld interface {
|
|||
// nodes, the volume is also deleted.
|
||||
DeleteVolumeNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
|
||||
|
||||
// IsVolumeAttachedToNode returns true if the specified volume/node combo exists
|
||||
// in the underlying store indicating the specified volume is attached to
|
||||
// the specified node.
|
||||
IsVolumeAttachedToNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool
|
||||
// GetAttachState returns the attach state for the given volume-node
|
||||
// combination.
|
||||
// Returns AttachStateAttached if the specified volume/node combo exists in
|
||||
// the underlying store indicating the specified volume is attached to the
|
||||
// specified node, AttachStateDetached if the combo does not exist, or
|
||||
// AttachStateUncertain if the attached state is marked as uncertain.
|
||||
GetAttachState(volumeName v1.UniqueVolumeName, nodeName types.NodeName) AttachState
|
||||
|
||||
// GetAttachedVolumes generates and returns a list of volumes/node pairs
|
||||
// reflecting which volumes might attached to which nodes based on the
|
||||
|
@ -153,6 +156,31 @@ type AttachedVolume struct {
|
|||
DetachRequestedTime time.Time
|
||||
}
|
||||
|
||||
// AttachState represents the attach state of a volume to a node known to the
|
||||
// Actual State of World.
|
||||
// This type is used as external representation of attach state (specifically
|
||||
// as the return type of GetAttachState only); the state is represented
|
||||
// differently in the internal cache implementation.
|
||||
type AttachState int
|
||||
|
||||
const (
|
||||
// AttachStateAttached represents the state in which the volume is attached to
|
||||
// the node.
|
||||
AttachStateAttached AttachState = iota
|
||||
|
||||
// AttachStateUncertain represents the state in which the Actual State of World
|
||||
// does not know whether the volume is attached to the node.
|
||||
AttachStateUncertain
|
||||
|
||||
// AttachStateDetached represents the state in which the volume is not
|
||||
// attached to the node.
|
||||
AttachStateDetached
|
||||
)
|
||||
|
||||
func (s AttachState) String() string {
|
||||
return []string{"Attached", "Uncertain", "Detached"}[s]
|
||||
}
|
||||
|
||||
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
|
||||
func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
|
||||
return &actualStateOfWorld{
|
||||
|
@ -530,19 +558,22 @@ func (asw *actualStateOfWorld) DeleteVolumeNode(
|
|||
asw.removeVolumeFromReportAsAttached(volumeName, nodeName)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) IsVolumeAttachedToNode(
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool {
|
||||
func (asw *actualStateOfWorld) GetAttachState(
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) AttachState {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
|
||||
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||
if volumeExists {
|
||||
if node, nodeExists := volumeObj.nodesAttachedTo[nodeName]; nodeExists {
|
||||
return node.attachedConfirmed
|
||||
if node.attachedConfirmed {
|
||||
return AttachStateAttached
|
||||
}
|
||||
return AttachStateUncertain
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return AttachStateDetached
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
|
||||
|
|
64
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go
generated
vendored
64
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go
generated
vendored
|
@ -142,6 +142,7 @@ func (rc *reconciler) reconcile() {
|
|||
for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() {
|
||||
if !rc.desiredStateOfWorld.VolumeExists(
|
||||
attachedVolume.VolumeName, attachedVolume.NodeName) {
|
||||
|
||||
// Check whether there already exist an operation pending, and don't even
|
||||
// try to start an operation if there is already one running.
|
||||
// This check must be done before we do any other checks, as otherwise the other checks
|
||||
|
@ -161,6 +162,21 @@ func (rc *reconciler) reconcile() {
|
|||
}
|
||||
}
|
||||
|
||||
// Because the detach operation updates the ActualStateOfWorld before
|
||||
// marking itself complete, it's possible for the volume to be removed
|
||||
// from the ActualStateOfWorld between the GetAttachedVolumes() check
|
||||
// and the IsOperationPending() check above.
|
||||
// Check the ActualStateOfWorld again to avoid issuing an unnecessary
|
||||
// detach.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/93902
|
||||
attachState := rc.actualStateOfWorld.GetAttachState(attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
if attachState == cache.AttachStateDetached {
|
||||
if klog.V(5) {
|
||||
klog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached--skipping", ""))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Set the detach request time
|
||||
elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
if err != nil {
|
||||
|
@ -226,7 +242,31 @@ func (rc *reconciler) reconcile() {
|
|||
func (rc *reconciler) attachDesiredVolumes() {
|
||||
// Ensure volumes that should be attached are attached.
|
||||
for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() {
|
||||
if rc.actualStateOfWorld.IsVolumeAttachedToNode(volumeToAttach.VolumeName, volumeToAttach.NodeName) {
|
||||
if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
|
||||
// Don't even try to start an operation if there is already one running for the given volume and node.
|
||||
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName) {
|
||||
if klog.V(10) {
|
||||
klog.Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// Don't even try to start an operation if there is already one running for the given volume
|
||||
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */) {
|
||||
if klog.V(10) {
|
||||
klog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Because the attach operation updates the ActualStateOfWorld before
|
||||
// marking itself complete, IsOperationPending() must be checked before
|
||||
// GetAttachState() to guarantee the ActualStateOfWorld is
|
||||
// up-to-date when it's read.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/93902
|
||||
attachState := rc.actualStateOfWorld.GetAttachState(volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
if attachState == cache.AttachStateAttached {
|
||||
// Volume/Node exists, touch it to reset detachRequestedTime
|
||||
if klog.V(5) {
|
||||
klog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", ""))
|
||||
|
@ -235,26 +275,7 @@ func (rc *reconciler) attachDesiredVolumes() {
|
|||
continue
|
||||
}
|
||||
|
||||
if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
|
||||
|
||||
// Don't even try to start an operation if there is already one running for the given volume and node.
|
||||
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName) {
|
||||
if klog.V(10) {
|
||||
klog.Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
// Don't even try to start an operation if there is already one running for the given volume
|
||||
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */) {
|
||||
if klog.V(10) {
|
||||
klog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
|
||||
nodes := rc.actualStateOfWorld.GetNodesForAttachedVolume(volumeToAttach.VolumeName)
|
||||
if len(nodes) > 0 {
|
||||
if !volumeToAttach.MultiAttachErrorReported {
|
||||
|
@ -263,7 +284,6 @@ func (rc *reconciler) attachDesiredVolumes() {
|
|||
}
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Volume/Node doesn't exist, spawn a goroutine to attach it
|
||||
|
|
|
@ -167,6 +167,8 @@ func NewManager(numaNodeInfo cputopology.NUMANodeInfo, topologyPolicyName string
|
|||
}
|
||||
|
||||
func (m *manager) GetAffinity(podUID string, containerName string) TopologyHint {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
return m.podTopologyHints[podUID][containerName]
|
||||
}
|
||||
|
||||
|
@ -256,10 +258,12 @@ func (m *manager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitR
|
|||
}
|
||||
|
||||
klog.Infof("[topologymanager] Topology Affinity for (pod: %v container: %v): %v", pod.UID, container.Name, result)
|
||||
m.mutex.Lock()
|
||||
if m.podTopologyHints[string(pod.UID)] == nil {
|
||||
m.podTopologyHints[string(pod.UID)] = make(map[string]TopologyHint)
|
||||
}
|
||||
m.podTopologyHints[string(pod.UID)][container.Name] = result
|
||||
m.mutex.Unlock()
|
||||
|
||||
err := m.allocateAlignedResources(pod, &container)
|
||||
if err != nil {
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"net/http"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -252,24 +253,28 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon
|
|||
ds.network = network.NewPluginManager(plug)
|
||||
klog.Infof("Docker cri networking managed by %v", plug.Name())
|
||||
|
||||
// NOTE: cgroup driver is only detectable in docker 1.11+
|
||||
cgroupDriver := defaultCgroupDriver
|
||||
dockerInfo, err := ds.client.Info()
|
||||
klog.Infof("Docker Info: %+v", dockerInfo)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to execute Info() call to the Docker client: %v", err)
|
||||
klog.Warningf("Falling back to use the default driver: %q", cgroupDriver)
|
||||
} else if len(dockerInfo.CgroupDriver) == 0 {
|
||||
klog.Warningf("No cgroup driver is set in Docker")
|
||||
klog.Warningf("Falling back to use the default driver: %q", cgroupDriver)
|
||||
} else {
|
||||
cgroupDriver = dockerInfo.CgroupDriver
|
||||
// skipping cgroup driver checks for Windows
|
||||
if runtime.GOOS == "linux" {
|
||||
// NOTE: cgroup driver is only detectable in docker 1.11+
|
||||
cgroupDriver := defaultCgroupDriver
|
||||
dockerInfo, err := ds.client.Info()
|
||||
klog.Infof("Docker Info: %+v", dockerInfo)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to execute Info() call to the Docker client: %v", err)
|
||||
klog.Warningf("Falling back to use the default driver: %q", cgroupDriver)
|
||||
} else if len(dockerInfo.CgroupDriver) == 0 {
|
||||
klog.Warningf("No cgroup driver is set in Docker")
|
||||
klog.Warningf("Falling back to use the default driver: %q", cgroupDriver)
|
||||
} else {
|
||||
cgroupDriver = dockerInfo.CgroupDriver
|
||||
}
|
||||
if len(kubeCgroupDriver) != 0 && kubeCgroupDriver != cgroupDriver {
|
||||
return nil, fmt.Errorf("misconfiguration: kubelet cgroup driver: %q is different from docker cgroup driver: %q", kubeCgroupDriver, cgroupDriver)
|
||||
}
|
||||
klog.Infof("Setting cgroupDriver to %s", cgroupDriver)
|
||||
ds.cgroupDriver = cgroupDriver
|
||||
}
|
||||
if len(kubeCgroupDriver) != 0 && kubeCgroupDriver != cgroupDriver {
|
||||
return nil, fmt.Errorf("misconfiguration: kubelet cgroup driver: %q is different from docker cgroup driver: %q", kubeCgroupDriver, cgroupDriver)
|
||||
}
|
||||
klog.Infof("Setting cgroupDriver to %s", cgroupDriver)
|
||||
ds.cgroupDriver = cgroupDriver
|
||||
|
||||
ds.versionCache = cache.NewObjectCache(
|
||||
func() (interface{}, error) {
|
||||
return ds.getDockerVersion()
|
||||
|
|
|
@ -48,6 +48,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
|
@ -127,6 +128,9 @@ const (
|
|||
// Max amount of time to wait for the container runtime to come up.
|
||||
maxWaitForContainerRuntime = 30 * time.Second
|
||||
|
||||
// Max amount of time to wait for node list/watch to initially sync
|
||||
maxWaitForAPIServerSync = 10 * time.Second
|
||||
|
||||
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
|
||||
nodeStatusUpdateRetry = 5
|
||||
|
||||
|
@ -136,11 +140,6 @@ const (
|
|||
// MaxContainerBackOff is the max backoff period, exported for the e2e test
|
||||
MaxContainerBackOff = 300 * time.Second
|
||||
|
||||
// Capacity of the channel for storing pods to kill. A small number should
|
||||
// suffice because a goroutine is dedicated to check the channel and does
|
||||
// not block on anything else.
|
||||
podKillingChannelCapacity = 50
|
||||
|
||||
// Period for performing global cleanup tasks.
|
||||
housekeepingPeriod = time.Second * 2
|
||||
|
||||
|
@ -519,14 +518,30 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
}
|
||||
serviceLister := corelisters.NewServiceLister(serviceIndexer)
|
||||
|
||||
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
|
||||
var nodeHasSynced cache.InformerSynced
|
||||
var nodeLister corelisters.NodeLister
|
||||
|
||||
if kubeDeps.KubeClient != nil {
|
||||
fieldSelector := fields.Set{api.ObjectNameField: string(nodeName)}.AsSelector()
|
||||
nodeLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.CoreV1().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector)
|
||||
r := cache.NewReflector(nodeLW, &v1.Node{}, nodeIndexer, 0)
|
||||
go r.Run(wait.NeverStop)
|
||||
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
||||
options.FieldSelector = fields.Set{api.ObjectNameField: string(nodeName)}.String()
|
||||
}))
|
||||
nodeLister = kubeInformers.Core().V1().Nodes().Lister()
|
||||
nodeHasSynced = func() bool {
|
||||
if kubeInformers.Core().V1().Nodes().Informer().HasSynced() {
|
||||
return true
|
||||
}
|
||||
klog.Infof("kubelet nodes not sync")
|
||||
return false
|
||||
}
|
||||
kubeInformers.Start(wait.NeverStop)
|
||||
klog.Info("Kubelet client is not nil")
|
||||
} else {
|
||||
// we dont have a client to sync!
|
||||
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
|
||||
nodeLister = corelisters.NewNodeLister(nodeIndexer)
|
||||
nodeHasSynced = func() bool { return true }
|
||||
klog.Info("Kubelet client is nil")
|
||||
}
|
||||
nodeLister := corelisters.NewNodeLister(nodeIndexer)
|
||||
|
||||
// TODO: get the real node object of ourself,
|
||||
// and use the real node name and UID.
|
||||
|
@ -578,6 +593,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, parsedNodeIP, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig),
|
||||
serviceLister: serviceLister,
|
||||
nodeLister: nodeLister,
|
||||
nodeHasSynced: nodeHasSynced,
|
||||
masterServiceNamespace: masterServiceNamespace,
|
||||
streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration,
|
||||
recorder: kubeDeps.Recorder,
|
||||
|
@ -843,7 +859,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
klet.podWorkers = newPodWorkers(klet.syncPod, kubeDeps.Recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache)
|
||||
|
||||
klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
|
||||
klet.podKillingCh = make(chan *kubecontainer.PodPair, podKillingChannelCapacity)
|
||||
klet.podKiller = NewPodKiller(klet)
|
||||
|
||||
etcHostsPathFunc := func(podUID types.UID) string { return getEtcHostsPath(klet.getPodDir(podUID)) }
|
||||
// setup eviction manager
|
||||
|
@ -967,7 +983,9 @@ type Kubelet struct {
|
|||
serviceLister serviceLister
|
||||
// nodeLister knows how to list nodes
|
||||
nodeLister corelisters.NodeLister
|
||||
|
||||
// nodeHasSynced indicates whether nodes have been sync'd at least once.
|
||||
// Check this before trusting a response from the node lister.
|
||||
nodeHasSynced cache.InformerSynced
|
||||
// a list of node labels to register
|
||||
nodeLabels map[string]string
|
||||
|
||||
|
@ -1129,8 +1147,8 @@ type Kubelet struct {
|
|||
// Container restart Backoff
|
||||
backOff *flowcontrol.Backoff
|
||||
|
||||
// Channel for sending pods to kill.
|
||||
podKillingCh chan *kubecontainer.PodPair
|
||||
// Pod killer handles pods to be killed
|
||||
podKiller PodKiller
|
||||
|
||||
// Information about the ports which are opened by daemons on Node running this Kubelet server.
|
||||
daemonEndpoints *v1.NodeDaemonEndpoints
|
||||
|
@ -1439,7 +1457,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
|||
|
||||
// Start a goroutine responsible for killing pods (that are not properly
|
||||
// handled by pod workers).
|
||||
go wait.Until(kl.podKiller, 1*time.Second, wait.NeverStop)
|
||||
go wait.Until(kl.podKiller.PerformPodKillingWork, 1*time.Second, wait.NeverStop)
|
||||
|
||||
// Start component sync loops.
|
||||
kl.statusManager.Start()
|
||||
|
@ -1507,6 +1525,13 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// If the pod is a static pod and its mirror pod is still gracefully terminating,
|
||||
// we do not want to start the new static pod until the old static pod is gracefully terminated.
|
||||
podFullName := kubecontainer.GetPodFullName(pod)
|
||||
if kl.podKiller.IsMirrorPodPendingTerminationByPodName(podFullName) {
|
||||
return fmt.Errorf("pod %q is pending termination", podFullName)
|
||||
}
|
||||
|
||||
// Latency measurements for the main workflow are relative to the
|
||||
// first time the pod was seen by the API server.
|
||||
var firstSeenTime time.Time
|
||||
|
@ -1640,7 +1665,6 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
|
||||
// Create Mirror Pod for Static Pod if it doesn't already exist
|
||||
if kubetypes.IsStaticPod(pod) {
|
||||
podFullName := kubecontainer.GetPodFullName(pod)
|
||||
deleted := false
|
||||
if mirrorPod != nil {
|
||||
if mirrorPod.DeletionTimestamp != nil || !kl.podManager.IsMirrorPodOf(mirrorPod, pod) {
|
||||
|
@ -1764,7 +1788,10 @@ func (kl *Kubelet) deletePod(pod *v1.Pod) error {
|
|||
}
|
||||
podPair := kubecontainer.PodPair{APIPod: pod, RunningPod: &runningPod}
|
||||
|
||||
kl.podKillingCh <- &podPair
|
||||
if _, ok := kl.podManager.GetMirrorPodByPod(pod); ok {
|
||||
kl.podKiller.MarkMirrorPodPendingTermination(pod)
|
||||
}
|
||||
kl.podKiller.KillPod(&podPair)
|
||||
// TODO: delete the mirror pod here?
|
||||
|
||||
// We leave the volume/directory cleanup to the periodic cleanup routine.
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"io/ioutil"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/klog"
|
||||
|
@ -30,6 +31,7 @@ import (
|
|||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
|
@ -232,6 +234,15 @@ func (kl *Kubelet) GetNode() (*v1.Node, error) {
|
|||
if kl.kubeClient == nil {
|
||||
return kl.initialNode(context.TODO())
|
||||
}
|
||||
// if we have a valid kube client, we wait for initial lister to sync
|
||||
if !kl.nodeHasSynced() {
|
||||
err := wait.PollImmediate(time.Second, maxWaitForAPIServerSync, func() (bool, error) {
|
||||
return kl.nodeHasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("nodes have not yet been read at least once, cannot construct node object")
|
||||
}
|
||||
}
|
||||
return kl.nodeLister.Get(string(kl.nodeName))
|
||||
}
|
||||
|
||||
|
@ -242,7 +253,7 @@ func (kl *Kubelet) GetNode() (*v1.Node, error) {
|
|||
// zero capacity, and the default labels.
|
||||
func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) {
|
||||
if kl.kubeClient != nil {
|
||||
if n, err := kl.nodeLister.Get(string(kl.nodeName)); err == nil {
|
||||
if n, err := kl.GetNode(); err == nil {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,6 +70,11 @@ import (
|
|||
const (
|
||||
managedHostsHeader = "# Kubernetes-managed hosts file.\n"
|
||||
managedHostsHeaderWithHostNetwork = "# Kubernetes-managed hosts file (host network).\n"
|
||||
|
||||
// Capacity of the channel for storing pods to kill. A small number should
|
||||
// suffice because a goroutine is dedicated to check the channel and does
|
||||
// not block on anything else.
|
||||
podKillingChannelCapacity = 50
|
||||
)
|
||||
|
||||
// Get a list of pods that have data directories.
|
||||
|
@ -1010,6 +1015,23 @@ func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Po
|
|||
kl.statusManager.RemoveOrphanedStatuses(podUIDs)
|
||||
}
|
||||
|
||||
// deleteOrphanedMirrorPods checks whether pod killer has done with orphaned mirror pod.
|
||||
// If pod killing is done, podManager.DeleteMirrorPod() is called to delete mirror pod
|
||||
// from the API server
|
||||
func (kl *Kubelet) deleteOrphanedMirrorPods() {
|
||||
podFullNames := kl.podManager.GetOrphanedMirrorPodNames()
|
||||
for _, podFullname := range podFullNames {
|
||||
if !kl.podKiller.IsMirrorPodPendingTerminationByPodName(podFullname) {
|
||||
_, err := kl.podManager.DeleteMirrorPod(podFullname, nil)
|
||||
if err != nil {
|
||||
klog.Errorf("encountered error when deleting mirror pod %q : %v", podFullname, err)
|
||||
} else {
|
||||
klog.V(3).Infof("deleted pod %q", podFullname)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HandlePodCleanups performs a series of cleanup work, including terminating
|
||||
// pod workers, killing unwanted pods, and removing orphaned volumes/pod
|
||||
// directories.
|
||||
|
@ -1061,7 +1083,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
|||
}
|
||||
for _, pod := range runningPods {
|
||||
if _, found := desiredPods[pod.ID]; !found {
|
||||
kl.podKillingCh <- &kubecontainer.PodPair{APIPod: nil, RunningPod: pod}
|
||||
kl.podKiller.KillPod(&kubecontainer.PodPair{APIPod: nil, RunningPod: pod})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1089,24 +1111,112 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
|||
}
|
||||
|
||||
// Remove any orphaned mirror pods.
|
||||
kl.podManager.DeleteOrphanedMirrorPods()
|
||||
kl.deleteOrphanedMirrorPods()
|
||||
|
||||
// Remove any cgroups in the hierarchy for pods that are no longer running.
|
||||
if kl.cgroupsPerQOS {
|
||||
kl.cleanupOrphanedPodCgroups(cgroupPods, activePods)
|
||||
pcm := kl.containerManager.NewPodContainerManager()
|
||||
kl.cleanupOrphanedPodCgroups(pcm, cgroupPods, activePods)
|
||||
}
|
||||
|
||||
kl.backOff.GC()
|
||||
return nil
|
||||
}
|
||||
|
||||
// podKiller launches a goroutine to kill a pod received from the channel if
|
||||
// PodKiller handles requests for killing pods
|
||||
type PodKiller interface {
|
||||
// KillPod receives pod speficier representing the pod to kill
|
||||
KillPod(pair *kubecontainer.PodPair)
|
||||
// PerformPodKillingWork performs the actual pod killing work via calling CRI
|
||||
// It returns after its Close() func is called and all outstanding pod killing requests are served
|
||||
PerformPodKillingWork()
|
||||
// After Close() is called, this pod killer wouldn't accept any more pod killing requests
|
||||
Close()
|
||||
// IsMirrorPodPendingTerminationByPodName checks whether the mirror pod for the given full pod name is pending termination
|
||||
IsMirrorPodPendingTerminationByPodName(podFullname string) bool
|
||||
// IsMirrorPodPendingTerminationByUID checks whether the mirror pod for the given uid is pending termination
|
||||
IsMirrorPodPendingTerminationByUID(uid types.UID) bool
|
||||
// MarkMirrorPodPendingTermination marks the mirror pod entering grace period of termination
|
||||
MarkMirrorPodPendingTermination(pod *v1.Pod)
|
||||
}
|
||||
|
||||
// podKillerWithChannel is an implementation of PodKiller which receives pod killing requests via channel
|
||||
type podKillerWithChannel struct {
|
||||
// Channel for getting pods to kill.
|
||||
podKillingCh chan *kubecontainer.PodPair
|
||||
// lock for synchronization between HandlePodCleanups and pod killer
|
||||
podKillingLock *sync.Mutex
|
||||
// mirrorPodTerminationMap keeps track of the progress of mirror pod termination
|
||||
// The key is the UID of the pod and the value is the full name of the pod
|
||||
mirrorPodTerminationMap map[string]string
|
||||
// killPod is the func which invokes runtime to kill the pod
|
||||
killPod func(pod *v1.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error
|
||||
}
|
||||
|
||||
// NewPodKiller returns a functional PodKiller
|
||||
func NewPodKiller(kl *Kubelet) PodKiller {
|
||||
podKiller := &podKillerWithChannel{
|
||||
podKillingCh: make(chan *kubecontainer.PodPair, podKillingChannelCapacity),
|
||||
podKillingLock: &sync.Mutex{},
|
||||
mirrorPodTerminationMap: make(map[string]string),
|
||||
killPod: kl.killPod,
|
||||
}
|
||||
return podKiller
|
||||
}
|
||||
|
||||
// IsMirrorPodPendingTerminationByUID checks whether the pod for the given uid is pending termination
|
||||
func (pk *podKillerWithChannel) IsMirrorPodPendingTerminationByUID(uid types.UID) bool {
|
||||
pk.podKillingLock.Lock()
|
||||
defer pk.podKillingLock.Unlock()
|
||||
_, ok := pk.mirrorPodTerminationMap[string(uid)]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsMirrorPodPendingTerminationByPodName checks whether the given pod is in grace period of termination
|
||||
func (pk *podKillerWithChannel) IsMirrorPodPendingTerminationByPodName(podFullname string) bool {
|
||||
pk.podKillingLock.Lock()
|
||||
defer pk.podKillingLock.Unlock()
|
||||
for _, name := range pk.mirrorPodTerminationMap {
|
||||
if name == podFullname {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (pk *podKillerWithChannel) markMirrorPodTerminated(uid string) {
|
||||
pk.podKillingLock.Lock()
|
||||
klog.V(4).Infof("marking pod termination %q", uid)
|
||||
delete(pk.mirrorPodTerminationMap, uid)
|
||||
pk.podKillingLock.Unlock()
|
||||
}
|
||||
|
||||
// MarkMirrorPodPendingTermination marks the pod entering grace period of termination
|
||||
func (pk *podKillerWithChannel) MarkMirrorPodPendingTermination(pod *v1.Pod) {
|
||||
fullname := kubecontainer.GetPodFullName(pod)
|
||||
klog.V(3).Infof("marking pod pending termination %q", string(pod.UID))
|
||||
pk.podKillingLock.Lock()
|
||||
pk.mirrorPodTerminationMap[string(pod.UID)] = fullname
|
||||
pk.podKillingLock.Unlock()
|
||||
}
|
||||
|
||||
// Close closes the channel through which requests are delivered
|
||||
func (pk *podKillerWithChannel) Close() {
|
||||
close(pk.podKillingCh)
|
||||
}
|
||||
|
||||
// KillPod sends pod killing request to the killer
|
||||
func (pk *podKillerWithChannel) KillPod(pair *kubecontainer.PodPair) {
|
||||
pk.podKillingCh <- pair
|
||||
}
|
||||
|
||||
// PerformPodKillingWork launches a goroutine to kill a pod received from the channel if
|
||||
// another goroutine isn't already in action.
|
||||
func (kl *Kubelet) podKiller() {
|
||||
func (pk *podKillerWithChannel) PerformPodKillingWork() {
|
||||
killing := sets.NewString()
|
||||
// guard for the killing set
|
||||
lock := sync.Mutex{}
|
||||
for podPair := range kl.podKillingCh {
|
||||
for podPair := range pk.podKillingCh {
|
||||
runningPod := podPair.RunningPod
|
||||
apiPod := podPair.APIPod
|
||||
|
||||
|
@ -1120,13 +1230,14 @@ func (kl *Kubelet) podKiller() {
|
|||
if !exists {
|
||||
go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod) {
|
||||
klog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
|
||||
err := kl.killPod(apiPod, runningPod, nil, nil)
|
||||
err := pk.killPod(apiPod, runningPod, nil, nil)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
|
||||
}
|
||||
lock.Lock()
|
||||
killing.Delete(string(runningPod.ID))
|
||||
lock.Unlock()
|
||||
pk.markMirrorPodTerminated(string(runningPod.ID))
|
||||
}(apiPod, runningPod)
|
||||
}
|
||||
}
|
||||
|
@ -1712,13 +1823,12 @@ func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID
|
|||
|
||||
// cleanupOrphanedPodCgroups removes cgroups that should no longer exist.
|
||||
// it reconciles the cached state of cgroupPods with the specified list of runningPods
|
||||
func (kl *Kubelet) cleanupOrphanedPodCgroups(cgroupPods map[types.UID]cm.CgroupName, activePods []*v1.Pod) {
|
||||
func (kl *Kubelet) cleanupOrphanedPodCgroups(pcm cm.PodContainerManager, cgroupPods map[types.UID]cm.CgroupName, activePods []*v1.Pod) {
|
||||
// Add all running pods to the set that we want to preserve
|
||||
podSet := sets.NewString()
|
||||
for _, pod := range activePods {
|
||||
podSet.Insert(string(pod.UID))
|
||||
}
|
||||
pcm := kl.containerManager.NewPodContainerManager()
|
||||
|
||||
// Iterate over all the found pods to verify if they should be running
|
||||
for uid, val := range cgroupPods {
|
||||
|
@ -1727,6 +1837,11 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups(cgroupPods map[types.UID]cm.CgroupN
|
|||
continue
|
||||
}
|
||||
|
||||
// if the pod is within termination grace period, we shouldn't cleanup the underlying cgroup
|
||||
if kl.podKiller.IsMirrorPodPendingTerminationByUID(uid) {
|
||||
klog.V(3).Infof("pod %q is pending termination", uid)
|
||||
continue
|
||||
}
|
||||
// If volumes have not been unmounted/detached, do not delete the cgroup
|
||||
// so any memory backed volumes don't have their charges propagated to the
|
||||
// parent croup. If the volumes still exist, reduce the cpu shares for any
|
||||
|
|
|
@ -77,10 +77,8 @@ type Manager interface {
|
|||
// this means deleting the mappings related to mirror pods. For non-
|
||||
// mirror pods, this means deleting from indexes for all non-mirror pods.
|
||||
DeletePod(pod *v1.Pod)
|
||||
// DeleteOrphanedMirrorPods deletes all mirror pods which do not have
|
||||
// associated static pods. This method sends deletion requests to the API
|
||||
// server, but does NOT modify the internal pod storage in basicManager.
|
||||
DeleteOrphanedMirrorPods()
|
||||
// GetOrphanedMirrorPodNames returns names of orphaned mirror pods
|
||||
GetOrphanedMirrorPodNames() []string
|
||||
// TranslatePodUID returns the actual UID of a pod. If the UID belongs to
|
||||
// a mirror pod, returns the UID of its static pod. Otherwise, returns the
|
||||
// original UID.
|
||||
|
@ -323,7 +321,7 @@ func (pm *basicManager) GetUIDTranslations() (podToMirror map[kubetypes.Resolved
|
|||
return podToMirror, mirrorToPod
|
||||
}
|
||||
|
||||
func (pm *basicManager) getOrphanedMirrorPodNames() []string {
|
||||
func (pm *basicManager) GetOrphanedMirrorPodNames() []string {
|
||||
pm.lock.RLock()
|
||||
defer pm.lock.RUnlock()
|
||||
var podFullNames []string
|
||||
|
@ -335,13 +333,6 @@ func (pm *basicManager) getOrphanedMirrorPodNames() []string {
|
|||
return podFullNames
|
||||
}
|
||||
|
||||
func (pm *basicManager) DeleteOrphanedMirrorPods() {
|
||||
podFullNames := pm.getOrphanedMirrorPodNames()
|
||||
for _, podFullName := range podFullNames {
|
||||
pm.MirrorClient.DeleteMirrorPod(podFullName, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *basicManager) IsMirrorPodOf(mirrorPod, pod *v1.Pod) bool {
|
||||
// Check name and namespace first.
|
||||
if pod.Name != mirrorPod.Name || pod.Namespace != mirrorPod.Namespace {
|
||||
|
|
|
@ -11,6 +11,7 @@ go_library(
|
|||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core/validation:go_default_library",
|
||||
"//pkg/controller/volume/scheduling:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
|
@ -375,7 +376,7 @@ func (sched *Scheduler) Run(ctx context.Context) {
|
|||
func (sched *Scheduler) recordSchedulingFailure(prof *profile.Profile, podInfo *framework.PodInfo, err error, reason string, message string) {
|
||||
sched.Error(podInfo, err)
|
||||
pod := podInfo.Pod
|
||||
prof.Recorder.Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", message)
|
||||
prof.Recorder.Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", truncateMessage(message))
|
||||
if err := sched.podConditionUpdater.update(pod, &v1.PodCondition{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionFalse,
|
||||
|
@ -386,6 +387,16 @@ func (sched *Scheduler) recordSchedulingFailure(prof *profile.Profile, podInfo *
|
|||
}
|
||||
}
|
||||
|
||||
// truncateMessage truncates a message if it hits the NoteLengthLimit.
|
||||
func truncateMessage(message string) string {
|
||||
max := validation.NoteLengthLimit
|
||||
if len(message) <= max {
|
||||
return message
|
||||
}
|
||||
suffix := " ..."
|
||||
return message[:max-len(suffix)] + suffix
|
||||
}
|
||||
|
||||
// preempt tries to create room for a pod that has failed to schedule, by preempting lower priority pods if possible.
|
||||
// If it succeeds, it adds the name of the node where preemption has happened to the pod spec.
|
||||
// It returns the node name and an error if any.
|
||||
|
|
|
@ -28,6 +28,7 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/utils/exec"
|
||||
"k8s.io/utils/mount"
|
||||
|
@ -451,11 +452,12 @@ type VolumeHost interface {
|
|||
|
||||
// VolumePluginMgr tracks registered plugins.
|
||||
type VolumePluginMgr struct {
|
||||
mutex sync.Mutex
|
||||
plugins map[string]VolumePlugin
|
||||
prober DynamicPluginProber
|
||||
probedPlugins map[string]VolumePlugin
|
||||
Host VolumeHost
|
||||
mutex sync.Mutex
|
||||
plugins map[string]VolumePlugin
|
||||
prober DynamicPluginProber
|
||||
probedPlugins map[string]VolumePlugin
|
||||
loggedDeprecationWarnings sets.String
|
||||
Host VolumeHost
|
||||
}
|
||||
|
||||
// Spec is an internal representation of a volume. All API volume types translate to Spec.
|
||||
|
@ -586,6 +588,7 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPlu
|
|||
defer pm.mutex.Unlock()
|
||||
|
||||
pm.Host = host
|
||||
pm.loggedDeprecationWarnings = sets.NewString()
|
||||
|
||||
if prober == nil {
|
||||
// Use a dummy prober to prevent nil deference.
|
||||
|
@ -682,9 +685,7 @@ func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) {
|
|||
}
|
||||
|
||||
// Issue warning if the matched provider is deprecated
|
||||
if detail, ok := deprecatedVolumeProviders[matches[0].GetPluginName()]; ok {
|
||||
klog.Warningf("WARNING: %s built-in volume provider is now deprecated. %s", matches[0].GetPluginName(), detail)
|
||||
}
|
||||
pm.logDeprecation(matches[0].GetPluginName())
|
||||
return matches[0], nil
|
||||
}
|
||||
|
||||
|
@ -717,12 +718,20 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) {
|
|||
}
|
||||
|
||||
// Issue warning if the matched provider is deprecated
|
||||
if detail, ok := deprecatedVolumeProviders[matches[0].GetPluginName()]; ok {
|
||||
klog.Warningf("WARNING: %s built-in volume provider is now deprecated. %s", matches[0].GetPluginName(), detail)
|
||||
}
|
||||
pm.logDeprecation(matches[0].GetPluginName())
|
||||
return matches[0], nil
|
||||
}
|
||||
|
||||
// logDeprecation logs warning when a deprecated plugin is used.
|
||||
func (pm *VolumePluginMgr) logDeprecation(plugin string) {
|
||||
if detail, ok := deprecatedVolumeProviders[plugin]; ok && !pm.loggedDeprecationWarnings.Has(plugin) {
|
||||
klog.Warningf("WARNING: %s built-in volume provider is now deprecated. %s", plugin, detail)
|
||||
// Make sure the message is logged only once. It has Warning severity
|
||||
// and we don't want to spam the log too much.
|
||||
pm.loggedDeprecationWarnings.Insert(plugin)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if probedPlugin cache update is required.
|
||||
// If it is, initialize all probed plugins and replace the cache with them.
|
||||
func (pm *VolumePluginMgr) refreshProbedPlugins() {
|
||||
|
|
|
@ -181,11 +181,40 @@ func (az *Cloud) CreateOrUpdateSecurityGroup(service *v1.Service, sg network.Sec
|
|||
return rerr.Error()
|
||||
}
|
||||
|
||||
func cleanupSubnetInFrontendIPConfigurations(lb *network.LoadBalancer) network.LoadBalancer {
|
||||
if lb.LoadBalancerPropertiesFormat == nil || lb.FrontendIPConfigurations == nil {
|
||||
return *lb
|
||||
}
|
||||
|
||||
frontendIPConfigurations := *lb.FrontendIPConfigurations
|
||||
for i := range frontendIPConfigurations {
|
||||
config := frontendIPConfigurations[i]
|
||||
if config.FrontendIPConfigurationPropertiesFormat != nil &&
|
||||
config.Subnet != nil &&
|
||||
config.Subnet.ID != nil {
|
||||
subnet := network.Subnet{
|
||||
ID: config.Subnet.ID,
|
||||
}
|
||||
if config.Subnet.Name != nil {
|
||||
subnet.Name = config.FrontendIPConfigurationPropertiesFormat.Subnet.Name
|
||||
}
|
||||
config.FrontendIPConfigurationPropertiesFormat.Subnet = &subnet
|
||||
frontendIPConfigurations[i] = config
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
lb.FrontendIPConfigurations = &frontendIPConfigurations
|
||||
return *lb
|
||||
}
|
||||
|
||||
// CreateOrUpdateLB invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
lb = cleanupSubnetInFrontendIPConfigurations(&lb)
|
||||
|
||||
rgName := az.getLoadBalancerResourceGroup()
|
||||
rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, *lb.Name, lb, to.String(lb.Etag))
|
||||
klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
|
||||
|
|
|
@ -1363,10 +1363,15 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
|
|||
vmssNamesMap[vmSetName] = true
|
||||
}
|
||||
|
||||
vmssUpdaters := make([]func() error, 0, len(vmssNamesMap))
|
||||
errors := make([]error, 0, len(vmssNamesMap))
|
||||
for vmssName := range vmssNamesMap {
|
||||
vmssName := vmssName
|
||||
vmss, err := ss.getVMSS(vmssName, azcache.CacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return err
|
||||
klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get VMSS %s: %v", vmssName, err)
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
|
||||
|
@ -1382,11 +1387,15 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
|
|||
vmssNIC := *vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
|
||||
primaryNIC, err := ss.getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssName)
|
||||
if err != nil {
|
||||
return err
|
||||
klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get the primary network interface config of the VMSS %s: %v", vmssName, err)
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
primaryIPConfig, err := getPrimaryIPConfigFromVMSSNetworkConfig(primaryNIC)
|
||||
if err != nil {
|
||||
return err
|
||||
klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to the primary IP config from the VMSS %s's network config : %v", vmssName, err)
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
loadBalancerBackendAddressPools := []compute.SubResource{}
|
||||
if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
|
||||
|
@ -1407,26 +1416,39 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
|
|||
continue
|
||||
}
|
||||
|
||||
// Compose a new vmss with added backendPoolID.
|
||||
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
newVMSS := compute.VirtualMachineScaleSet{
|
||||
Sku: vmss.Sku,
|
||||
Location: vmss.Location,
|
||||
VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
|
||||
VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
|
||||
NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{
|
||||
NetworkInterfaceConfigurations: &vmssNIC,
|
||||
vmssUpdaters = append(vmssUpdaters, func() error {
|
||||
// Compose a new vmss with added backendPoolID.
|
||||
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
newVMSS := compute.VirtualMachineScaleSet{
|
||||
Sku: vmss.Sku,
|
||||
Location: vmss.Location,
|
||||
VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
|
||||
VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
|
||||
NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{
|
||||
NetworkInterfaceConfigurations: &vmssNIC,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(2).Infof("ensureBackendPoolDeletedFromVMSS begins to update vmss(%s) with backendPoolID %s", vmssName, backendPoolID)
|
||||
rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS)
|
||||
if rerr != nil {
|
||||
klog.Errorf("ensureBackendPoolDeletedFromVMSS CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, err)
|
||||
return rerr.Error()
|
||||
}
|
||||
klog.V(2).Infof("ensureBackendPoolDeletedFromVMSS begins to update vmss(%s) with backendPoolID %s", vmssName, backendPoolID)
|
||||
rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS)
|
||||
if rerr != nil {
|
||||
klog.Errorf("ensureBackendPoolDeletedFromVMSS CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, rerr)
|
||||
return rerr.Error()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
errs := utilerrors.AggregateGoroutines(vmssUpdaters...)
|
||||
if errs != nil {
|
||||
return utilerrors.Flatten(errs)
|
||||
}
|
||||
// Fail if there are other errors.
|
||||
if len(errors) > 0 {
|
||||
return utilerrors.Flatten(utilerrors.NewAggregate(errors))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -1145,7 +1145,7 @@ gopkg.in/square/go-jose.v2/jwt
|
|||
gopkg.in/warnings.v0
|
||||
# gopkg.in/yaml.v2 v2.2.8
|
||||
gopkg.in/yaml.v2
|
||||
# k8s.io/api v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.15-k3s1
|
||||
# k8s.io/api v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.18.16-k3s1
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
k8s.io/api/admissionregistration/v1
|
||||
|
@ -1189,7 +1189,7 @@ k8s.io/api/settings/v1alpha1
|
|||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.15-k3s1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.16-k3s1
|
||||
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
||||
|
@ -1229,7 +1229,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
|
|||
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||
# k8s.io/apimachinery v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.15-k3s1
|
||||
# k8s.io/apimachinery v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.18.16-k3s1
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
k8s.io/apimachinery/pkg/api/meta
|
||||
|
@ -1291,7 +1291,7 @@ k8s.io/apimachinery/pkg/watch
|
|||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.15-k3s1
|
||||
# k8s.io/apiserver v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.18.16-k3s1
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/configuration
|
||||
k8s.io/apiserver/pkg/admission/initializer
|
||||
|
@ -1421,7 +1421,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
|
|||
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.15-k3s1
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.18.16-k3s1
|
||||
k8s.io/cli-runtime/pkg/genericclioptions
|
||||
k8s.io/cli-runtime/pkg/kustomize
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
|
||||
|
@ -1434,7 +1434,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
|
|||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
|
||||
k8s.io/cli-runtime/pkg/printers
|
||||
k8s.io/cli-runtime/pkg/resource
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.15-k3s1
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.18.16-k3s1
|
||||
k8s.io/client-go/discovery
|
||||
k8s.io/client-go/discovery/cached
|
||||
k8s.io/client-go/discovery/cached/disk
|
||||
|
@ -1668,7 +1668,7 @@ k8s.io/client-go/util/jsonpath
|
|||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.15-k3s1
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.18.16-k3s1
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/api
|
||||
k8s.io/cloud-provider/node/helpers
|
||||
|
@ -1676,13 +1676,13 @@ k8s.io/cloud-provider/service/helpers
|
|||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/volume/errors
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.15-k3s1
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.16-k3s1
|
||||
k8s.io/cluster-bootstrap/token/api
|
||||
k8s.io/cluster-bootstrap/token/jws
|
||||
k8s.io/cluster-bootstrap/token/util
|
||||
k8s.io/cluster-bootstrap/util/secrets
|
||||
k8s.io/cluster-bootstrap/util/tokens
|
||||
# k8s.io/code-generator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.15-k3s1
|
||||
# k8s.io/code-generator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.18.16-k3s1
|
||||
k8s.io/code-generator/cmd/client-gen/args
|
||||
k8s.io/code-generator/cmd/client-gen/generators
|
||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||
|
@ -1697,7 +1697,7 @@ k8s.io/code-generator/cmd/lister-gen/args
|
|||
k8s.io/code-generator/cmd/lister-gen/generators
|
||||
k8s.io/code-generator/pkg/namer
|
||||
k8s.io/code-generator/pkg/util
|
||||
# k8s.io/component-base v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.15-k3s1
|
||||
# k8s.io/component-base v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.18.16-k3s1
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/cli/globalflag
|
||||
k8s.io/component-base/codec
|
||||
|
@ -1715,10 +1715,10 @@ k8s.io/component-base/metrics/prometheus/workqueue
|
|||
k8s.io/component-base/metrics/testutil
|
||||
k8s.io/component-base/version
|
||||
k8s.io/component-base/version/verflag
|
||||
# k8s.io/cri-api v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.15-k3s1
|
||||
# k8s.io/cri-api v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.18.16-k3s1
|
||||
k8s.io/cri-api/pkg/apis
|
||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.15-k3s1
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.16-k3s1
|
||||
k8s.io/csi-translation-lib
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
|
||||
|
@ -1733,7 +1733,7 @@ k8s.io/gengo/types
|
|||
k8s.io/heapster/metrics/api/v1/types
|
||||
# k8s.io/klog v1.0.0
|
||||
k8s.io/klog
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.15-k3s1
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.16-k3s1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
|
@ -1761,7 +1761,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
|
|||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.15-k3s1
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.16-k3s1
|
||||
k8s.io/kube-controller-manager/config/v1alpha1
|
||||
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
|
||||
k8s.io/kube-openapi/pkg/aggregator
|
||||
|
@ -1772,14 +1772,14 @@ k8s.io/kube-openapi/pkg/schemaconv
|
|||
k8s.io/kube-openapi/pkg/util
|
||||
k8s.io/kube-openapi/pkg/util/proto
|
||||
k8s.io/kube-openapi/pkg/util/proto/validation
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.15-k3s1
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.18.16-k3s1
|
||||
k8s.io/kube-proxy/config/v1alpha1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.15-k3s1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.16-k3s1
|
||||
k8s.io/kube-scheduler/config/v1
|
||||
k8s.io/kube-scheduler/config/v1alpha1
|
||||
k8s.io/kube-scheduler/config/v1alpha2
|
||||
k8s.io/kube-scheduler/extender/v1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.15-k3s1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.18.16-k3s1
|
||||
k8s.io/kubectl/pkg/apps
|
||||
k8s.io/kubectl/pkg/cmd
|
||||
k8s.io/kubectl/pkg/cmd/annotate
|
||||
|
@ -1854,11 +1854,11 @@ k8s.io/kubectl/pkg/util/storage
|
|||
k8s.io/kubectl/pkg/util/templates
|
||||
k8s.io/kubectl/pkg/util/term
|
||||
k8s.io/kubectl/pkg/validation
|
||||
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.15-k3s1
|
||||
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.18.16-k3s1
|
||||
k8s.io/kubelet/config/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
||||
# k8s.io/kubernetes v1.18.15 => github.com/k3s-io/kubernetes v1.18.15-k3s1
|
||||
# k8s.io/kubernetes v1.18.16 => github.com/k3s-io/kubernetes v1.18.16-k3s1
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
|
||||
|
@ -2601,7 +2601,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
|||
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
||||
k8s.io/kubernetes/third_party/forked/ipvs
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.15-k3s1
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.16-k3s1
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/azure
|
||||
k8s.io/legacy-cloud-providers/azure/auth
|
||||
|
@ -2632,7 +2632,7 @@ k8s.io/legacy-cloud-providers/openstack
|
|||
k8s.io/legacy-cloud-providers/vsphere
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.15-k3s1
|
||||
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.18.16-k3s1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
||||
|
|
Loading…
Reference in New Issue