diff --git a/go.mod b/go.mod index 1659fd928a..368ca04c1d 100644 --- a/go.mod +++ b/go.mod @@ -33,31 +33,31 @@ replace ( github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a - k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.10-k3s1 - k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.10-k3s1 - k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.10-k3s1 - k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.10-k3s1 - k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.10-k3s1 - k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.10-k3s1 - k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.10-k3s1 - k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.10-k3s1 - k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.10-k3s1 - k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.10-k3s1 - k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.10-k3s1 - k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.10-k3s1 - k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.10-k3s1 - k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.10-k3s1 - k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.10-k3s1 - k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.10-k3s1 - k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.10-k3s1 - k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.10-k3s1 - k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.10-k3s1 - k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.10-k3s1 - k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.10-k3s1 - k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.10-k3s1 - k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.10-k3s1 - k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.10-k3s1 - k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.10-k3s1 + k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.12-k3s1 + k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.12-k3s1 + k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.12-k3s1 + k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.12-k3s1 + k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.12-k3s1 + k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.12-k3s1 + k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.12-k3s1 + k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.12-k3s1 + k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.12-k3s1 + k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.12-k3s1 + k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.12-k3s1 + k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.12-k3s1 + k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.12-k3s1 + k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.12-k3s1 + k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.12-k3s1 + k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.12-k3s1 + k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.12-k3s1 + k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.12-k3s1 + k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.12-k3s1 + k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.12-k3s1 + k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.12-k3s1 + k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.12-k3s1 + k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.12-k3s1 + k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.12-k3s1 + k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.12-k3s1 mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 ) @@ -123,5 +123,5 @@ require ( k8s.io/component-base v0.0.0 k8s.io/cri-api v0.0.0 k8s.io/klog v1.0.0 - k8s.io/kubernetes v1.18.10 + k8s.io/kubernetes v1.18.12 ) diff --git a/go.sum b/go.sum index d4626a1c91..1cfbfc3af4 100644 --- a/go.sum +++ b/go.sum @@ -638,49 +638,49 @@ github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd h1:KPnQ github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd/go.mod h1:QYmg8cqWPPfIbpEuhtJbEdWwA6PEKSY016Z6EdfL9+8= github.com/rancher/kine v0.5.0 h1:ot9ZInMCb0482aWfvO+3gI2B+e9vGxpY12EDJgpowiY= github.com/rancher/kine v0.5.0/go.mod h1:NoqDMfN0Q+Wu23Kk3MfXfgLO2fE6abLaetejZs9HAYo= -github.com/rancher/kubernetes v1.18.10-k3s1 h1:RS7DmQf0U/iNhGO0MIvfmzycCowMSvUQhwZNVdydWm8= -github.com/rancher/kubernetes v1.18.10-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8= -github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.10-k3s1 h1:tZfjkh+JjJ3omi19P8caLdN9ql5Ftpk0tWAmJh4Bo5E= -github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.10-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w= -github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.10-k3s1 h1:Isl/Gi7wCyZ31Hpkw+o3SAYx+SP9VcL5Udbwndq5MfE= -github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.10-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk= -github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.10-k3s1 h1:zXR4Em0Og8HoZU14b6gMb74n9JUMYd2pzzN8tFvHniw= -github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.10-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o= -github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.10-k3s1 h1:v6Yeydb3yeJB6+MQVGGjIOQcYWkHpfr1WdNy7N+9kdg= -github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.10-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw= -github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.10-k3s1 h1:OU2/souymetniKHDb8S6RmrXVsBV/WJuY9spVQBb+Dc= -github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.10-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE= -github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.10-k3s1 h1:4/FsynISjNWhC+c0JVFawCnKtM2Q4jzvP6xaVIG1u6o= -github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.10-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos= -github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.10-k3s1 h1:+GhEM4DXe0BMWEcss49rNkFLHp032Ybt/FfIjtjiAqY= -github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.10-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI= -github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.10-k3s1 h1:/GzhswAi+6qxukCTwLlPKLPup2xcZ1ZxM0RI525do/o= -github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.10-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8= -github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.10-k3s1 h1:OjdqC+CK/kcK0Lq3LX2MCBSxzXc0yDNS9UqwDoRLgxU= -github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.10-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw= -github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.10-k3s1 h1:5XsuCxrfl1s08MRm+4i98l4fsCW9KbAFdGXV+x3Anqw= -github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.10-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q= -github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.10-k3s1 h1:dFGSPzS/pc3yjJ50dafrybu9tzjuWXX/qxngAWgQT5g= -github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.10-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk= -github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.10-k3s1 h1:siigEKE3XiGkwmD0fVWE26l+xiFrCZv+xfvTttheHyE= -github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.10-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.10-k3s1 h1:uPfAAj4yin/8X3j63jxyLqR8qpM7Zo6yD3zfy4leifI= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.10-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.10-k3s1 h1:DIA6KBLS1vRYYT9S3XZXPn8Q82g+AvYE9LJcb9rwRfM= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.10-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.10-k3s1 h1:UBMPWPH3k09FyAKPMBrS5FG9j6e7CAZrouSOFgQLK2A= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.10-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.10-k3s1 h1:OdBE36l4S/XNpxI6OgFkVPZGuAjB1ePecuCIpSDkA0o= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.10-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI= -github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.10-k3s1 h1:aTvmtU9ML9ME/hk1xph6MSpa7hwR4w2I1wkWcYWPqj4= -github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.10-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA= -github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.10-k3s1 h1:+uo+XJdwIT74zIHV7Rz7r4BtnZU0XO1u7C0N5xIEJrs= -github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.10-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew= -github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.10-k3s1 h1:S0KK6mxYic3gfzLR/EE7+BByZ64dOVo7j0PizDehcIY= -github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.10-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA= -github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.10-k3s1 h1:LtO8DDrNAzK113XtUrZEvFTfZ4WxaGULIVtOkxVuAJQ= -github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.10-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y= -github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.10-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo= +github.com/rancher/kubernetes v1.18.12-k3s1 h1:+hXEKCwvMFGPNNyvFIOPN52bJc8xcer6VZ7fz5Nu/OI= +github.com/rancher/kubernetes v1.18.12-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8= +github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.12-k3s1 h1:NaSzl7vG93KrUElE1CHI9lxIO9uDAwEHo8VsV0WJ598= +github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.12-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w= +github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.12-k3s1 h1:261dtMIau64TfXX8FfFUr1WDOVF5uxzQreRleYNMCZM= +github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.12-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk= +github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.12-k3s1 h1:bTWwCfpUL53+NelTSKZAR54pa/a/OuUbnx/86filFGE= +github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.12-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o= +github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.12-k3s1 h1:rOkUH/dHox1xwoced6boi6MVk18JdXeFb4YF94wjMOk= +github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.12-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw= +github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.12-k3s1 h1:ObqU/bJNW6NySSt1kv1BjkRm0aIl6umospoDUAoBZ3k= +github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.12-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE= +github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.12-k3s1 h1:tCj6ux06rqFidCjl8NupD2RKUfz7uZypaekyuFXXDSM= +github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.12-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos= +github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.12-k3s1 h1:Co3XSVcxjaCIN/45D7VpTWc0s+IM6SsyzFERQZVzA7Q= +github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.12-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI= +github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.12-k3s1 h1:HTIKccHPSLlxGV4yByEPnkQd5A67SgmtXnr8EJXjz0k= +github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.12-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8= +github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.12-k3s1 h1:c0zH8CS14wzAtM3XzKLBvSM/wXTQT8Y2wQle2IWrmtI= +github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.12-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw= +github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.12-k3s1 h1:BKWjjDMya/0zJJ6ls21uD5qUvQZxhIN47q/jRskw624= +github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.12-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q= +github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.12-k3s1 h1:RzfQEpY8x31OPPOIb98zyExJur/BVVHpYTqMKRQ8Bq4= +github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.12-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk= +github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.12-k3s1 h1:DJV1i7LO/khqvWwavsG8qnVKZorBT9iZh1q0klsOp4g= +github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.12-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.12-k3s1 h1:ODU3EqkMHbbXE+bwioJEHopt48dQwWkG0sZjxregvGg= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.12-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.12-k3s1 h1:Bdx5Ap+HMi+7O4CvocUwCAU7fKKI8+v7dc4JWhGYga4= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.12-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.12-k3s1 h1:Q5XkzB1+5rg33XjdIvKMXBuGozL3ABtvbYx/5HaTV3c= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.12-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.12-k3s1 h1:SbQPyh8XzwK4A01eW6FQ+56X/HOvStrWUjlReiTjfnI= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.12-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI= +github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.12-k3s1 h1:Aq+xR7++BnhFw113x9bJLny4uVsrJ6XMYPiTDamNskA= +github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.12-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA= +github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.12-k3s1 h1:6PYKx4M1KAATwdcUEBGW6l0nFiVcCzBPToLZTPec3Vo= +github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.12-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew= +github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.12-k3s1 h1:FeodK0hfR3zulgYpAlE3Dd6WaoKElhRWmVU52gszuuo= +github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.12-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA= +github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.12-k3s1 h1:cp08nQTd18HvKEmNXC5+u0kaaZxsanjoy/JhVGwHjoo= +github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.12-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y= +github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.12-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo= github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U= github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc= github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI= diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go index 9d5fdeeced..00ce5f785c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -114,6 +114,18 @@ func negotiateProtocol(clientProtocols, serverProtocols []string) string { return "" } +func commaSeparatedHeaderValues(header []string) []string { + var parsedClientProtocols []string + for i := range header { + for _, clientProtocol := range strings.Split(header[i], ",") { + if proto := strings.Trim(clientProtocol, " "); len(proto) > 0 { + parsedClientProtocols = append(parsedClientProtocols, proto) + } + } + } + return parsedClientProtocols +} + // Handshake performs a subprotocol negotiation. If the client did request a // subprotocol, Handshake will select the first common value found in // serverProtocols. If a match is found, Handshake adds a response header @@ -121,7 +133,7 @@ func negotiateProtocol(clientProtocols, serverProtocols []string) string { // returned, along with a response header containing the list of protocols the // server can accept. func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) { - clientProtocols := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)] + clientProtocols := commaSeparatedHeaderValues(req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)]) if len(clientProtocols) == 0 { return "", fmt.Errorf("unable to upgrade: %s is required", HeaderProtocolVersion) } diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index aff4e60e91..7ce222b72c 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -3,8 +3,8 @@ package version var ( gitMajor = "1" gitMinor = "18" - gitVersion = "v1.18.10-k3s1" - gitCommit = "c8d808cfc3c2c00c8d542b84e368ca439987e352" + gitVersion = "v1.18.12-k3s1" + gitCommit = "60c04eedb8755effb61deafd2cc43f4cf44aeee4" gitTreeState = "clean" - buildDate = "2020-10-15T18:25:57Z" + buildDate = "2020-11-12T20:51:09Z" ) diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index 61989a2cfa..d2da62997c 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -289,8 +289,12 @@ func (le *LeaderElector) release() bool { if !le.IsLeader() { return true } + now := metav1.Now() leaderElectionRecord := rl.LeaderElectionRecord{ - LeaderTransitions: le.observedRecord.LeaderTransitions, + LeaderTransitions: le.observedRecord.LeaderTransitions, + LeaseDurationSeconds: 1, + RenewTime: now, + AcquireTime: now, } if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil { klog.Errorf("Failed to release lock: %v", err) diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go index 6390b4ef5f..8c28026bad 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go @@ -92,8 +92,12 @@ func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord) cml.cm.Annotations = make(map[string]string) } cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{}) - return err + cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{}) + if err != nil { + return err + } + cml.cm = cm + return nil } // RecordEvent in leader election while adding meta-data diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go index 132c5a5489..d11e43e338 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go @@ -87,8 +87,12 @@ func (el *EndpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) e el.e.Annotations = make(map[string]string) } el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{}) - return err + e, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{}) + if err != nil { + return err + } + el.e = e + return nil } // RecordEvent in leader election while adding meta-data diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 3d76d174ea..a403497279 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -71,9 +71,14 @@ func (ll *LeaseLock) Update(ctx context.Context, ler LeaderElectionRecord) error return errors.New("lease not initialized, call get or create first") } ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler) - var err error - ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ctx, ll.lease, metav1.UpdateOptions{}) - return err + + lease, err := ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ctx, ll.lease, metav1.UpdateOptions{}) + if err != nil { + return err + } + + ll.lease = lease + return nil } // RecordEvent in leader election while adding meta-data diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index 36d6500f58..31bb103788 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -47,10 +47,8 @@ type tlsCacheKey struct { keyData string certFile string keyFile string - getCert string serverName string nextProtos string - dial string disableCompression bool } @@ -59,22 +57,24 @@ func (t tlsCacheKey) String() string { if len(t.keyData) > 0 { keyText = "" } - return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s disableCompression:%t", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial, t.disableCompression) + return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, serverName:%s, disableCompression:%t", t.insecure, t.caData, t.certData, keyText, t.serverName, t.disableCompression) } func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { - key, err := tlsConfigKey(config) + key, canCache, err := tlsConfigKey(config) if err != nil { return nil, err } - // Ensure we only create a single transport for the given TLS options - c.mu.Lock() - defer c.mu.Unlock() + if canCache { + // Ensure we only create a single transport for the given TLS options + c.mu.Lock() + defer c.mu.Unlock() - // See if we already have a custom transport for this config - if t, ok := c.transports[key]; ok { - return t, nil + // See if we already have a custom transport for this config + if t, ok := c.transports[key]; ok { + return t, nil + } } // Get the TLS options for this client config @@ -104,8 +104,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { go dynamicCertDialer.Run(wait.NeverStop) } - // Cache a single transport for these options - c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{ + transport := utilnet.SetTransportDefaults(&http.Transport{ Proxy: http.ProxyFromEnvironment, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, @@ -113,22 +112,32 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { DialContext: dial, DisableCompression: config.DisableCompression, }) - return c.transports[key], nil + + if canCache { + // Cache a single transport for these options + c.transports[key] = transport + } + + return transport, nil } // tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor -func tlsConfigKey(c *Config) (tlsCacheKey, error) { +func tlsConfigKey(c *Config) (tlsCacheKey, bool, error) { // Make sure ca/key/cert content is loaded if err := loadTLSFiles(c); err != nil { - return tlsCacheKey{}, err + return tlsCacheKey{}, false, err } + + if c.TLS.GetCert != nil || c.Dial != nil { + // cannot determine equality for functions + return tlsCacheKey{}, false, nil + } + k := tlsCacheKey{ insecure: c.TLS.Insecure, caData: string(c.TLS.CAData), - getCert: fmt.Sprintf("%p", c.TLS.GetCert), serverName: c.TLS.ServerName, nextProtos: strings.Join(c.TLS.NextProtos, ","), - dial: fmt.Sprintf("%p", c.Dial), disableCompression: c.DisableCompression, } @@ -140,5 +149,5 @@ func tlsConfigKey(c *Config) (tlsCacheKey, error) { k.keyData = string(c.TLS.KeyData) } - return k, nil + return k, true, nil } diff --git a/vendor/k8s.io/component-base/version/base.go b/vendor/k8s.io/component-base/version/base.go index aff4e60e91..7ce222b72c 100644 --- a/vendor/k8s.io/component-base/version/base.go +++ b/vendor/k8s.io/component-base/version/base.go @@ -3,8 +3,8 @@ package version var ( gitMajor = "1" gitMinor = "18" - gitVersion = "v1.18.10-k3s1" - gitCommit = "c8d808cfc3c2c00c8d542b84e368ca439987e352" + gitVersion = "v1.18.12-k3s1" + gitCommit = "60c04eedb8755effb61deafd2cc43f4cf44aeee4" gitTreeState = "clean" - buildDate = "2020-10-15T18:25:57Z" + buildDate = "2020-11-12T20:51:09Z" ) diff --git a/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go b/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go index 76a3b86dbb..e0b0dc8738 100644 --- a/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go +++ b/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go @@ -41,6 +41,8 @@ const ( secretNameTemplate = "azure-storage-account-%s-secret" defaultSecretNamespace = "default" + + resourceGroupAnnotation = "kubernetes.io/azure-file-resource-group" ) var _ InTreePlugin = &azureFileCSITranslator{} @@ -116,7 +118,13 @@ func (t *azureFileCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) klog.Warningf("getStorageAccountName(%s) returned with error: %v", azureSource.SecretName, err) accountName = azureSource.SecretName } - volumeID := fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, "") + resourceGroup := "" + if pv.ObjectMeta.Annotations != nil { + if v, ok := pv.ObjectMeta.Annotations[resourceGroupAnnotation]; ok { + resourceGroup = v + } + } + volumeID := fmt.Sprintf(volumeIDTemplate, resourceGroup, accountName, azureSource.ShareName, "") var ( // refer to https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md @@ -155,6 +163,7 @@ func (t *azureFileCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume) ReadOnly: csiSource.ReadOnly, } + resourceGroup := "" if csiSource.NodeStageSecretRef != nil && csiSource.NodeStageSecretRef.Name != "" { azureSource.SecretName = csiSource.NodeStageSecretRef.Name azureSource.SecretNamespace = &csiSource.NodeStageSecretRef.Namespace @@ -164,16 +173,23 @@ func (t *azureFileCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume) } } } else { - _, storageAccount, fileShareName, _, err := getFileShareInfo(csiSource.VolumeHandle) + rg, storageAccount, fileShareName, _, err := getFileShareInfo(csiSource.VolumeHandle) if err != nil { return nil, err } azureSource.ShareName = fileShareName azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount) + resourceGroup = rg } pv.Spec.CSI = nil pv.Spec.AzureFile = azureSource + if resourceGroup != "" { + if pv.ObjectMeta.Annotations == nil { + pv.ObjectMeta.Annotations = map[string]string{} + } + pv.ObjectMeta.Annotations[resourceGroupAnnotation] = resourceGroup + } return pv, nil } diff --git a/vendor/k8s.io/kubectl/pkg/drain/drain.go b/vendor/k8s.io/kubectl/pkg/drain/drain.go index 9c203668f7..f205387cad 100644 --- a/vendor/k8s.io/kubectl/pkg/drain/drain.go +++ b/vendor/k8s.io/kubectl/pkg/drain/drain.go @@ -318,7 +318,6 @@ func (d *Helper) evictPods(pods []corev1.Pod, policyGroupVersion string, getPodF if err != nil { errors = append(errors, err) } - default: } } diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/core.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/core.go index 78bce527cc..f759a8d638 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/core.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/core.go @@ -358,7 +358,6 @@ func startVolumeExpandController(ctx ControllerContext) (http.Handler, bool, err ctx.ClientBuilder.ClientOrDie("expand-controller"), ctx.InformerFactory.Core().V1().PersistentVolumeClaims(), ctx.InformerFactory.Core().V1().PersistentVolumes(), - ctx.InformerFactory.Storage().V1().StorageClasses(), ctx.Cloud, plugins, csiTranslator, diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go index 0c718193d0..84c6713867 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go @@ -998,15 +998,17 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents [] if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect { return currentReplicas // Scaling is disabled } else if *scalingRules.SelectPolicy == autoscalingv2.MinPolicySelect { + result = math.MaxInt32 selectPolicyFn = min // For scaling up, the lowest change ('min' policy) produces a minimum value } else { + result = math.MinInt32 selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change } for _, policy := range scalingRules.Policies { replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents) periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod if policy.Type == autoscalingv2.PodsScalingPolicy { - proposed = int32(periodStartReplicas + policy.Value) + proposed = periodStartReplicas + policy.Value } else if policy.Type == autoscalingv2.PercentScalingPolicy { // the proposal has to be rounded up because the proposed change might not increase the replica count causing the target to never scale up proposed = int32(math.Ceil(float64(periodStartReplicas) * (1 + float64(policy.Value)/100))) @@ -1018,14 +1020,16 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents [] // calculateScaleDownLimitWithBehavior returns the maximum number of pods that could be deleted for the given HPAScalingRules func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 { - var result int32 = math.MaxInt32 + var result int32 var proposed int32 var selectPolicyFn func(int32, int32) int32 if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect { return currentReplicas // Scaling is disabled } else if *scalingRules.SelectPolicy == autoscalingv2.MinPolicySelect { + result = math.MinInt32 selectPolicyFn = max // For scaling down, the lowest change ('min' policy) produces a maximum value } else { + result = math.MaxInt32 selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change } for _, policy := range scalingRules.Policies { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go index 01bc0b1818..9c304e068e 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go @@ -74,8 +74,9 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti return 0, 0, 0, time.Time{}, fmt.Errorf("no pods returned by selector while calculating replica count") } - readyPodCount, ignoredPods, missingPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus) + readyPodCount, unreadyPods, missingPods, ignoredPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus) removeMetricsForPods(metrics, ignoredPods) + removeMetricsForPods(metrics, unreadyPods) requests, err := calculatePodRequests(podList, resource) if err != nil { return 0, 0, 0, time.Time{}, err @@ -90,7 +91,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti return 0, 0, 0, time.Time{}, err } - rebalanceIgnored := len(ignoredPods) > 0 && usageRatio > 1.0 + rebalanceIgnored := len(unreadyPods) > 0 && usageRatio > 1.0 if !rebalanceIgnored && len(missingPods) == 0 { if math.Abs(1.0-usageRatio) <= c.tolerance { // return the current replicas if the change would be too small @@ -117,7 +118,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti if rebalanceIgnored { // on a scale-up, treat unready pods as using 0% of the resource request - for podName := range ignoredPods { + for podName := range unreadyPods { metrics[podName] = metricsclient.PodMetric{Value: 0} } } @@ -182,8 +183,9 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet return 0, 0, fmt.Errorf("no pods returned by selector while calculating replica count") } - readyPodCount, ignoredPods, missingPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus) + readyPodCount, unreadyPods, missingPods, ignoredPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus) removeMetricsForPods(metrics, ignoredPods) + removeMetricsForPods(metrics, unreadyPods) if len(metrics) == 0 { return 0, 0, fmt.Errorf("did not receive metrics for any ready pods") @@ -191,7 +193,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet usageRatio, utilization := metricsclient.GetMetricUtilizationRatio(metrics, targetUtilization) - rebalanceIgnored := len(ignoredPods) > 0 && usageRatio > 1.0 + rebalanceIgnored := len(unreadyPods) > 0 && usageRatio > 1.0 if !rebalanceIgnored && len(missingPods) == 0 { if math.Abs(1.0-usageRatio) <= c.tolerance { @@ -219,7 +221,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet if rebalanceIgnored { // on a scale-up, treat unready pods as using 0% of the resource request - for podName := range ignoredPods { + for podName := range unreadyPods { metrics[podName] = metricsclient.PodMetric{Value: 0} } } @@ -364,16 +366,18 @@ func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(statusReplicas int32 return replicaCount, utilization, timestamp, nil } -func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1.ResourceName, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) (readyPodCount int, ignoredPods sets.String, missingPods sets.String) { +func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1.ResourceName, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) (readyPodCount int, unreadyPods, missingPods, ignoredPods sets.String) { missingPods = sets.NewString() + unreadyPods = sets.NewString() ignoredPods = sets.NewString() for _, pod := range pods { if pod.DeletionTimestamp != nil || pod.Status.Phase == v1.PodFailed { + ignoredPods.Insert(pod.Name) continue } // Pending pods are ignored. if pod.Status.Phase == v1.PodPending { - ignoredPods.Insert(pod.Name) + unreadyPods.Insert(pod.Name) continue } // Pods missing metrics. @@ -384,22 +388,22 @@ func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1 } // Unready pods are ignored. if resource == v1.ResourceCPU { - var ignorePod bool + var unready bool _, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady) if condition == nil || pod.Status.StartTime == nil { - ignorePod = true + unready = true } else { // Pod still within possible initialisation period. if pod.Status.StartTime.Add(cpuInitializationPeriod).After(time.Now()) { // Ignore sample if pod is unready or one window of metric wasn't collected since last state transition. - ignorePod = condition.Status == v1.ConditionFalse || metric.Timestamp.Before(condition.LastTransitionTime.Time.Add(metric.Window)) + unready = condition.Status == v1.ConditionFalse || metric.Timestamp.Before(condition.LastTransitionTime.Time.Add(metric.Window)) } else { // Ignore metric if pod is unready and it has never been ready. - ignorePod = condition.Status == v1.ConditionFalse && pod.Status.StartTime.Add(delayOfInitialReadinessStatus).After(condition.LastTransitionTime.Time) + unready = condition.Status == v1.ConditionFalse && pod.Status.StartTime.Add(delayOfInitialReadinessStatus).After(condition.LastTransitionTime.Time) } } - if ignorePod { - ignoredPods.Insert(pod.Name) + if unready { + unreadyPods.Insert(pod.Name) continue } } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD index 90e0574908..0f6b622a15 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD @@ -7,7 +7,6 @@ go_library( srcs = ["expand_controller.go"], importpath = "k8s.io/kubernetes/pkg/controller/volume/expand", deps = [ - "//pkg/apis/core/v1/helper:go_default_library", "//pkg/controller/volume/events:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/csimigration:go_default_library", @@ -22,12 +21,10 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", @@ -68,7 +65,6 @@ go_test( "//pkg/volume/util/operationexecutor:go_default_library", "//pkg/volume/util/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go index 73ce28e4cb..f1348ab2eb 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go @@ -32,18 +32,15 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" coreinformers "k8s.io/client-go/informers/core/v1" - storageclassinformer "k8s.io/client-go/informers/storage/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" corelisters "k8s.io/client-go/listers/core/v1" - storagelisters "k8s.io/client-go/listers/storage/v1" "k8s.io/client-go/tools/cache" kcache "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" cloudprovider "k8s.io/cloud-provider" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/controller/volume/events" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/csimigration" @@ -82,10 +79,6 @@ type expandController struct { pvLister corelisters.PersistentVolumeLister pvSynced kcache.InformerSynced - // storageClass lister for fetching provisioner name - classLister storagelisters.StorageClassLister - classListerSynced cache.InformerSynced - // cloud provider used by volume host cloud cloudprovider.Interface @@ -109,7 +102,6 @@ func NewExpandController( kubeClient clientset.Interface, pvcInformer coreinformers.PersistentVolumeClaimInformer, pvInformer coreinformers.PersistentVolumeInformer, - scInformer storageclassinformer.StorageClassInformer, cloud cloudprovider.Interface, plugins []volume.VolumePlugin, translator CSINameTranslator, @@ -122,8 +114,6 @@ func NewExpandController( pvcsSynced: pvcInformer.Informer().HasSynced, pvLister: pvInformer.Lister(), pvSynced: pvInformer.Informer().HasSynced, - classLister: scInformer.Lister(), - classListerSynced: scInformer.Informer().HasSynced, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "volume_expand"), translator: translator, csiMigratedPluginManager: csiMigratedPluginManager, @@ -236,19 +226,6 @@ func (expc *expandController) syncHandler(key string) error { return err } - claimClass := v1helper.GetPersistentVolumeClaimClass(pvc) - if claimClass == "" { - klog.V(4).Infof("volume expansion is disabled for PVC without StorageClasses: %s", util.ClaimToClaimKey(pvc)) - return nil - } - - class, err := expc.classLister.Get(claimClass) - if err != nil { - klog.V(4).Infof("failed to expand PVC: %s with error: %v", util.ClaimToClaimKey(pvc), err) - return nil - } - - volumeResizerName := class.Provisioner volumeSpec := volume.NewSpecFromPersistentVolume(pv, false) migratable, err := expc.csiMigratedPluginManager.IsMigratable(volumeSpec) if err != nil { @@ -257,9 +234,15 @@ func (expc *expandController) syncHandler(key string) error { } // handle CSI migration scenarios before invoking FindExpandablePluginBySpec for in-tree if migratable { - msg := fmt.Sprintf("CSI migration enabled for %s; waiting for external resizer to expand the pvc", volumeResizerName) + inTreePluginName, err := expc.csiMigratedPluginManager.GetInTreePluginNameFromSpec(volumeSpec.PersistentVolume, volumeSpec.Volume) + if err != nil { + klog.V(4).Infof("Error getting in-tree plugin name from persistent volume %s: %v", volumeSpec.PersistentVolume.Name, err) + return err + } + + msg := fmt.Sprintf("CSI migration enabled for %s; waiting for external resizer to expand the pvc", inTreePluginName) expc.recorder.Event(pvc, v1.EventTypeNormal, events.ExternalExpanding, msg) - csiResizerName, err := expc.translator.GetCSINameFromInTreeName(class.Provisioner) + csiResizerName, err := expc.translator.GetCSINameFromInTreeName(inTreePluginName) if err != nil { errorMsg := fmt.Sprintf("error getting CSI driver name for pvc %s, with error %v", util.ClaimToClaimKey(pvc), err) expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg) @@ -290,6 +273,7 @@ func (expc *expandController) syncHandler(key string) error { return nil } + volumeResizerName := volumePlugin.GetPluginName() return expc.expand(pvc, pv, volumeResizerName) } @@ -319,7 +303,7 @@ func (expc *expandController) Run(stopCh <-chan struct{}) { klog.Infof("Starting expand controller") defer klog.Infof("Shutting down expand controller") - if !cache.WaitForNamedCacheSync("expand", stopCh, expc.pvcsSynced, expc.pvSynced, expc.classListerSynced) { + if !cache.WaitForNamedCacheSync("expand", stopCh, expc.pvcsSynced, expc.pvSynced) { return } diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go index acabff676c..57b80c1b25 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go @@ -1161,9 +1161,10 @@ func (proxier *Proxier) syncProxyRules() { allowFromNode := false for _, src := range svcInfo.LoadBalancerSourceRanges() { writeLine(proxier.natRules, append(args, "-s", src, "-j", string(chosenChain))...) - // ignore error because it has been validated - _, cidr, _ := net.ParseCIDR(src) - if cidr.Contains(proxier.nodeIP) { + _, cidr, err := net.ParseCIDR(src) + if err != nil { + klog.Errorf("Error parsing %s CIDR in LoadBalancerSourceRanges, dropping: %v", cidr, err) + } else if cidr.Contains(proxier.nodeIP) { allowFromNode = true } } diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/service.go b/vendor/k8s.io/kubernetes/pkg/proxy/service.go index 7fb28b5280..d946f01d00 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/service.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/service.go @@ -146,10 +146,14 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic topologyKeys: service.Spec.TopologyKeys, } + loadBalancerSourceRanges := make([]string, len(service.Spec.LoadBalancerSourceRanges)) + for i, sourceRange := range service.Spec.LoadBalancerSourceRanges { + loadBalancerSourceRanges[i] = strings.TrimSpace(sourceRange) + } + if sct.isIPv6Mode == nil { info.externalIPs = make([]string, len(service.Spec.ExternalIPs)) - info.loadBalancerSourceRanges = make([]string, len(service.Spec.LoadBalancerSourceRanges)) - copy(info.loadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges) + info.loadBalancerSourceRanges = loadBalancerSourceRanges copy(info.externalIPs, service.Spec.ExternalIPs) // Deep-copy in case the service instance changes info.loadBalancerStatus = *service.Status.LoadBalancer.DeepCopy() @@ -162,7 +166,7 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic if len(incorrectIPs) > 0 { utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "externalIPs", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID) } - info.loadBalancerSourceRanges, incorrectIPs = utilproxy.FilterIncorrectCIDRVersion(service.Spec.LoadBalancerSourceRanges, *sct.isIPv6Mode) + info.loadBalancerSourceRanges, incorrectIPs = utilproxy.FilterIncorrectCIDRVersion(loadBalancerSourceRanges, *sct.isIPv6Mode) if len(incorrectIPs) > 0 { utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "loadBalancerSourceRanges", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/cachesize/cachesize.go b/vendor/k8s.io/kubernetes/pkg/registry/cachesize/cachesize.go index 37575af570..f8d6d47807 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/cachesize/cachesize.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/cachesize/cachesize.go @@ -41,6 +41,7 @@ func NewHeuristicWatchCacheSizes(expectedRAMCapacityMB int) map[schema.GroupReso watchCacheSizes[schema.GroupResource{Resource: "pods"}] = maxInt(50*clusterSize, 1000) watchCacheSizes[schema.GroupResource{Resource: "services"}] = maxInt(5*clusterSize, 1000) watchCacheSizes[schema.GroupResource{Resource: "events"}] = 0 + watchCacheSizes[schema.GroupResource{Resource: "events", Group: "events.k8s.io"}] = 0 watchCacheSizes[schema.GroupResource{Resource: "apiservices", Group: "apiregistration.k8s.io"}] = maxInt(5*clusterSize, 1000) watchCacheSizes[schema.GroupResource{Resource: "leases", Group: "coordination.k8s.io"}] = maxInt(5*clusterSize, 1000) return watchCacheSizes diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/BUILD b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/BUILD index f049b38387..ae73db5f0f 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/BUILD @@ -67,6 +67,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/component-base/featuregate:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go index 448d47f309..f4a9dbbe84 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go @@ -294,7 +294,7 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C } // However, "empty" preFilterState is legit which tolerates every toSchedule Pod. - if len(s.TpPairToMatchNum) == 0 || len(s.Constraints) == 0 { + if len(s.Constraints) == 0 { return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go b/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go index 22dc175951..6151ebd865 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go @@ -252,8 +252,10 @@ func (r *Resource) SetMaxResource(rl v1.ResourceList) { r.MilliCPU = cpu } case v1.ResourceEphemeralStorage: - if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage { - r.EphemeralStorage = ephemeralStorage + if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage { + r.EphemeralStorage = ephemeralStorage + } } default: if v1helper.IsScalarResourceName(rName) { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go index 3516cfc405..729fe65529 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go @@ -284,8 +284,17 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro } // UnmountDevice unmounts the volume on the node -func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error { - err := mount.CleanupMountPoint(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()), false) +func (d *azureDiskDetacher) UnmountDevice(deviceMountPath string) error { + if runtime.GOOS == "windows" { + // Flush data cache for windows because it does not do so automatically during unmount device + exec := d.plugin.host.GetExec(d.plugin.GetPluginName()) + err := util.WriteVolumeCache(deviceMountPath, exec) + if err != nil { + return err + } + } + + err := mount.CleanupMountPoint(deviceMountPath, d.plugin.host.GetMounter(d.plugin.GetPluginName()), false) if err == nil { klog.V(2).Infof("azureDisk - Device %s was unmounted", deviceMountPath) } else { diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go index e5e0c98614..ecc8a1e538 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go @@ -20,6 +20,7 @@ package azure import ( "net/http" + "regexp" "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" @@ -42,6 +43,12 @@ const ( // operationCancledErrorMessage means the operation is canceled by another new operation. operationCancledErrorMessage = "canceledandsupersededduetoanotheroperation" + + referencedResourceNotProvisionedMessageCode = "ReferencedResourceNotProvisioned" +) + +var ( + pipErrorMessageRE = regexp.MustCompile(`(?:.*)/subscriptions/(?:.*)/resourceGroups/(.*)/providers/Microsoft.Network/publicIPAddresses/([^\s]+)(?:.*)`) ) // RequestBackoff if backoff is disabled in cloud provider it @@ -193,12 +200,39 @@ func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", *lb.Name) az.lbCache.Delete(*lb.Name) } + + retryErrorMessage := rerr.Error().Error() // Invalidate the cache because another new operation has canceled the current request. if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCancledErrorMessage) { klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", *lb.Name) az.lbCache.Delete(*lb.Name) } + // The LB update may fail because the referenced PIP is not in the Succeeded provisioning state + if strings.Contains(strings.ToLower(retryErrorMessage), strings.ToLower(referencedResourceNotProvisionedMessageCode)) { + matches := pipErrorMessageRE.FindStringSubmatch(retryErrorMessage) + if len(matches) != 3 { + klog.Warningf("Failed to parse the retry error message %s", retryErrorMessage) + return rerr.Error() + } + pipRG, pipName := matches[1], matches[2] + klog.V(3).Infof("The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it", pipName, to.String(lb.Name)) + pip, _, err := az.getPublicIPAddress(pipRG, pipName) + if err != nil { + klog.Warningf("Failed to get the public IP %s in resource group %s: %v", pipName, pipRG, err) + return rerr.Error() + } + // Perform a dummy update to fix the provisioning state + err = az.CreateOrUpdatePIP(service, pipRG, pip) + if err != nil { + klog.Warningf("Failed to update the public IP %s in resource group %s: %v", pipName, pipRG, err) + return rerr.Error() + } + // Invalidate the LB cache, return the error, and the controller manager + // would retry the LB update in the next reconcile loop + az.lbCache.Delete(*lb.Name) + } + return rerr.Error() } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go index e0bd5bf42f..998f129af9 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go @@ -44,6 +44,9 @@ const ( maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks maxDisksPerStorageAccounts = 60 storageAccountUtilizationBeforeGrowing = 0.5 + // Disk Caching is not supported for disks 4 TiB and larger + // https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching + diskCachingLimit = 4096 // GiB maxLUN = 64 // max number of LUNs per VM errLeaseFailed = "AcquireDiskLeaseFailed" @@ -156,10 +159,21 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri return -1, danglingErr } - if disk.DiskProperties != nil && disk.DiskProperties.Encryption != nil && - disk.DiskProperties.Encryption.DiskEncryptionSetID != nil { - diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID + if disk.DiskProperties != nil { + if disk.DiskProperties.DiskSizeGB != nil && *disk.DiskProperties.DiskSizeGB >= diskCachingLimit && cachingMode != compute.CachingTypesNone { + // Disk Caching is not supported for disks 4 TiB and larger + // https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching + cachingMode = compute.CachingTypesNone + klog.Warningf("size of disk(%s) is %dGB which is bigger than limit(%dGB), set cacheMode as None", + diskURI, *disk.DiskProperties.DiskSizeGB, diskCachingLimit) + } + + if disk.DiskProperties.Encryption != nil && + disk.DiskProperties.Encryption.DiskEncryptionSetID != nil { + diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID + } } + if v, ok := disk.Tags[WriteAcceleratorEnabled]; ok { if v != nil && strings.EqualFold(*v, "true") { writeAcceleratorEnabled = true diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_instances.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_instances.go index f2e33f97bd..fc5494b115 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_instances.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_instances.go @@ -433,7 +433,11 @@ func (g *Cloud) getInstancesByNames(names []string) ([]*gceInstance, error) { return nil, err } if len(foundInstances) != len(names) { - return nil, cloudprovider.InstanceNotFound + if len(foundInstances) == 0 { + // return error so the TargetPool nodecount does not drop to 0 unexpectedly. + return nil, cloudprovider.InstanceNotFound + } + klog.Warningf("getFoundInstanceByNames - input instances %d, found %d. Continuing LoadBalancer Update", len(names), len(foundInstances)) } return foundInstances, nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 3c05da3774..8d9688a7a8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1141,7 +1141,7 @@ gopkg.in/square/go-jose.v2/jwt gopkg.in/warnings.v0 # gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 -# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.10-k3s1 +# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.12-k3s1 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -1185,7 +1185,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.10-k3s1 +# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.12-k3s1 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install @@ -1225,7 +1225,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi k8s.io/apiextensions-apiserver/pkg/registry/customresource k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition -# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.10-k3s1 +# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.12-k3s1 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -1287,7 +1287,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.10-k3s1 +# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.12-k3s1 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/initializer @@ -1417,7 +1417,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.10-k3s1 +# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.12-k3s1 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/kustomize k8s.io/cli-runtime/pkg/kustomize/k8sdeps @@ -1430,7 +1430,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.10-k3s1 +# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.12-k3s1 k8s.io/client-go/discovery k8s.io/client-go/discovery/cached k8s.io/client-go/discovery/cached/disk @@ -1664,7 +1664,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.10-k3s1 +# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.12-k3s1 k8s.io/cloud-provider k8s.io/cloud-provider/api k8s.io/cloud-provider/node/helpers @@ -1672,13 +1672,13 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.10-k3s1 +# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.12-k3s1 k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/jws k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/tokens -# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.10-k3s1 +# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.12-k3s1 k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/generators k8s.io/code-generator/cmd/client-gen/generators/fake @@ -1693,7 +1693,7 @@ k8s.io/code-generator/cmd/lister-gen/args k8s.io/code-generator/cmd/lister-gen/generators k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util -# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.10-k3s1 +# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.12-k3s1 k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag k8s.io/component-base/codec @@ -1711,10 +1711,10 @@ k8s.io/component-base/metrics/prometheus/workqueue k8s.io/component-base/metrics/testutil k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.10-k3s1 +# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.12-k3s1 k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.10-k3s1 +# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.12-k3s1 k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins # k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 @@ -1729,7 +1729,7 @@ k8s.io/gengo/types k8s.io/heapster/metrics/api/v1/types # k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.10-k3s1 +# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.12-k3s1 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/install k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 @@ -1757,7 +1757,7 @@ k8s.io/kube-aggregator/pkg/controllers/status k8s.io/kube-aggregator/pkg/registry/apiservice k8s.io/kube-aggregator/pkg/registry/apiservice/etcd k8s.io/kube-aggregator/pkg/registry/apiservice/rest -# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.10-k3s1 +# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.12-k3s1 k8s.io/kube-controller-manager/config/v1alpha1 # k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 k8s.io/kube-openapi/pkg/aggregator @@ -1768,14 +1768,14 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto/validation -# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.10-k3s1 +# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.12-k3s1 k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.10-k3s1 +# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.12-k3s1 k8s.io/kube-scheduler/config/v1 k8s.io/kube-scheduler/config/v1alpha1 k8s.io/kube-scheduler/config/v1alpha2 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.10-k3s1 +# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.12-k3s1 k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd k8s.io/kubectl/pkg/cmd/annotate @@ -1850,11 +1850,11 @@ k8s.io/kubectl/pkg/util/storage k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.10-k3s1 +# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.12-k3s1 k8s.io/kubelet/config/v1beta1 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 k8s.io/kubelet/pkg/apis/pluginregistration/v1 -# k8s.io/kubernetes v1.18.10 => github.com/rancher/kubernetes v1.18.10-k3s1 +# k8s.io/kubernetes v1.18.12 => github.com/rancher/kubernetes v1.18.12-k3s1 k8s.io/kubernetes/cmd/cloud-controller-manager/app k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme @@ -2597,7 +2597,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/traverse k8s.io/kubernetes/third_party/forked/ipvs -# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.10-k3s1 +# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.12-k3s1 k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure/auth @@ -2628,7 +2628,7 @@ k8s.io/legacy-cloud-providers/openstack k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.10-k3s1 +# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.12-k3s1 k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2