Update to v1.18.12 (#2512)

* Update to v1.18.12
pull/2514/head v1.18.12-rc1+k3s1
Brian Downs 2020-11-12 15:44:50 -07:00 committed by GitHub
parent 507a5e6f77
commit 0bd92448b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 298 additions and 188 deletions

52
go.mod
View File

@ -33,31 +33,31 @@ replace (
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.10-k3s1 k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.12-k3s1
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.10-k3s1 k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.12-k3s1
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.10-k3s1 k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.12-k3s1
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.10-k3s1 k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.12-k3s1
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.10-k3s1 k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.12-k3s1
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.10-k3s1 k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.12-k3s1
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.10-k3s1 k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.12-k3s1
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.10-k3s1 k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.12-k3s1
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.10-k3s1 k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.12-k3s1
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.10-k3s1 k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.12-k3s1
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.10-k3s1 k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.12-k3s1
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.10-k3s1 k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.12-k3s1
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.10-k3s1 k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.12-k3s1
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.10-k3s1 k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.12-k3s1
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.10-k3s1 k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.12-k3s1
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.10-k3s1 k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.12-k3s1
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.10-k3s1 k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.12-k3s1
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.10-k3s1 k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.12-k3s1
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.10-k3s1 k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.12-k3s1
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.10-k3s1 k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.12-k3s1
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.10-k3s1 k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.12-k3s1
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.10-k3s1 k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.12-k3s1
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.10-k3s1 k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.12-k3s1
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.10-k3s1 k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.12-k3s1
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.10-k3s1 k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.12-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
) )
@ -123,5 +123,5 @@ require (
k8s.io/component-base v0.0.0 k8s.io/component-base v0.0.0
k8s.io/cri-api v0.0.0 k8s.io/cri-api v0.0.0
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/kubernetes v1.18.10 k8s.io/kubernetes v1.18.12
) )

86
go.sum
View File

@ -638,49 +638,49 @@ github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd h1:KPnQ
github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd/go.mod h1:QYmg8cqWPPfIbpEuhtJbEdWwA6PEKSY016Z6EdfL9+8= github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd/go.mod h1:QYmg8cqWPPfIbpEuhtJbEdWwA6PEKSY016Z6EdfL9+8=
github.com/rancher/kine v0.5.0 h1:ot9ZInMCb0482aWfvO+3gI2B+e9vGxpY12EDJgpowiY= github.com/rancher/kine v0.5.0 h1:ot9ZInMCb0482aWfvO+3gI2B+e9vGxpY12EDJgpowiY=
github.com/rancher/kine v0.5.0/go.mod h1:NoqDMfN0Q+Wu23Kk3MfXfgLO2fE6abLaetejZs9HAYo= github.com/rancher/kine v0.5.0/go.mod h1:NoqDMfN0Q+Wu23Kk3MfXfgLO2fE6abLaetejZs9HAYo=
github.com/rancher/kubernetes v1.18.10-k3s1 h1:RS7DmQf0U/iNhGO0MIvfmzycCowMSvUQhwZNVdydWm8= github.com/rancher/kubernetes v1.18.12-k3s1 h1:+hXEKCwvMFGPNNyvFIOPN52bJc8xcer6VZ7fz5Nu/OI=
github.com/rancher/kubernetes v1.18.10-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8= github.com/rancher/kubernetes v1.18.12-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.10-k3s1 h1:tZfjkh+JjJ3omi19P8caLdN9ql5Ftpk0tWAmJh4Bo5E= github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.12-k3s1 h1:NaSzl7vG93KrUElE1CHI9lxIO9uDAwEHo8VsV0WJ598=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.10-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w= github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.12-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.10-k3s1 h1:Isl/Gi7wCyZ31Hpkw+o3SAYx+SP9VcL5Udbwndq5MfE= github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.12-k3s1 h1:261dtMIau64TfXX8FfFUr1WDOVF5uxzQreRleYNMCZM=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.10-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk= github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.12-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.10-k3s1 h1:zXR4Em0Og8HoZU14b6gMb74n9JUMYd2pzzN8tFvHniw= github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.12-k3s1 h1:bTWwCfpUL53+NelTSKZAR54pa/a/OuUbnx/86filFGE=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.10-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o= github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.12-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.10-k3s1 h1:v6Yeydb3yeJB6+MQVGGjIOQcYWkHpfr1WdNy7N+9kdg= github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.12-k3s1 h1:rOkUH/dHox1xwoced6boi6MVk18JdXeFb4YF94wjMOk=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.10-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw= github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.12-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.10-k3s1 h1:OU2/souymetniKHDb8S6RmrXVsBV/WJuY9spVQBb+Dc= github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.12-k3s1 h1:ObqU/bJNW6NySSt1kv1BjkRm0aIl6umospoDUAoBZ3k=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.10-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE= github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.12-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.10-k3s1 h1:4/FsynISjNWhC+c0JVFawCnKtM2Q4jzvP6xaVIG1u6o= github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.12-k3s1 h1:tCj6ux06rqFidCjl8NupD2RKUfz7uZypaekyuFXXDSM=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.10-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos= github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.12-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.10-k3s1 h1:+GhEM4DXe0BMWEcss49rNkFLHp032Ybt/FfIjtjiAqY= github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.12-k3s1 h1:Co3XSVcxjaCIN/45D7VpTWc0s+IM6SsyzFERQZVzA7Q=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.10-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI= github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.12-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.10-k3s1 h1:/GzhswAi+6qxukCTwLlPKLPup2xcZ1ZxM0RI525do/o= github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.12-k3s1 h1:HTIKccHPSLlxGV4yByEPnkQd5A67SgmtXnr8EJXjz0k=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.10-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8= github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.12-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.10-k3s1 h1:OjdqC+CK/kcK0Lq3LX2MCBSxzXc0yDNS9UqwDoRLgxU= github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.12-k3s1 h1:c0zH8CS14wzAtM3XzKLBvSM/wXTQT8Y2wQle2IWrmtI=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.10-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw= github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.12-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.10-k3s1 h1:5XsuCxrfl1s08MRm+4i98l4fsCW9KbAFdGXV+x3Anqw= github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.12-k3s1 h1:BKWjjDMya/0zJJ6ls21uD5qUvQZxhIN47q/jRskw624=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.10-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q= github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.12-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.10-k3s1 h1:dFGSPzS/pc3yjJ50dafrybu9tzjuWXX/qxngAWgQT5g= github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.12-k3s1 h1:RzfQEpY8x31OPPOIb98zyExJur/BVVHpYTqMKRQ8Bq4=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.10-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk= github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.12-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.10-k3s1 h1:siigEKE3XiGkwmD0fVWE26l+xiFrCZv+xfvTttheHyE= github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.12-k3s1 h1:DJV1i7LO/khqvWwavsG8qnVKZorBT9iZh1q0klsOp4g=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.10-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE= github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.12-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.10-k3s1 h1:uPfAAj4yin/8X3j63jxyLqR8qpM7Zo6yD3zfy4leifI= github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.12-k3s1 h1:ODU3EqkMHbbXE+bwioJEHopt48dQwWkG0sZjxregvGg=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.10-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8= github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.12-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.10-k3s1 h1:DIA6KBLS1vRYYT9S3XZXPn8Q82g+AvYE9LJcb9rwRfM= github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.12-k3s1 h1:Bdx5Ap+HMi+7O4CvocUwCAU7fKKI8+v7dc4JWhGYga4=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.10-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY= github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.12-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.10-k3s1 h1:UBMPWPH3k09FyAKPMBrS5FG9j6e7CAZrouSOFgQLK2A= github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.12-k3s1 h1:Q5XkzB1+5rg33XjdIvKMXBuGozL3ABtvbYx/5HaTV3c=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.10-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU= github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.12-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.10-k3s1 h1:OdBE36l4S/XNpxI6OgFkVPZGuAjB1ePecuCIpSDkA0o= github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.12-k3s1 h1:SbQPyh8XzwK4A01eW6FQ+56X/HOvStrWUjlReiTjfnI=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.10-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI= github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.12-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.10-k3s1 h1:aTvmtU9ML9ME/hk1xph6MSpa7hwR4w2I1wkWcYWPqj4= github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.12-k3s1 h1:Aq+xR7++BnhFw113x9bJLny4uVsrJ6XMYPiTDamNskA=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.10-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA= github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.12-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.10-k3s1 h1:+uo+XJdwIT74zIHV7Rz7r4BtnZU0XO1u7C0N5xIEJrs= github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.12-k3s1 h1:6PYKx4M1KAATwdcUEBGW6l0nFiVcCzBPToLZTPec3Vo=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.10-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew= github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.12-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.10-k3s1 h1:S0KK6mxYic3gfzLR/EE7+BByZ64dOVo7j0PizDehcIY= github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.12-k3s1 h1:FeodK0hfR3zulgYpAlE3Dd6WaoKElhRWmVU52gszuuo=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.10-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA= github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.12-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.10-k3s1 h1:LtO8DDrNAzK113XtUrZEvFTfZ4WxaGULIVtOkxVuAJQ= github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.12-k3s1 h1:cp08nQTd18HvKEmNXC5+u0kaaZxsanjoy/JhVGwHjoo=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.10-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y= github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.12-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.10-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo= github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.12-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U= github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc= github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI= github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI=

View File

@ -114,6 +114,18 @@ func negotiateProtocol(clientProtocols, serverProtocols []string) string {
return "" return ""
} }
func commaSeparatedHeaderValues(header []string) []string {
var parsedClientProtocols []string
for i := range header {
for _, clientProtocol := range strings.Split(header[i], ",") {
if proto := strings.Trim(clientProtocol, " "); len(proto) > 0 {
parsedClientProtocols = append(parsedClientProtocols, proto)
}
}
}
return parsedClientProtocols
}
// Handshake performs a subprotocol negotiation. If the client did request a // Handshake performs a subprotocol negotiation. If the client did request a
// subprotocol, Handshake will select the first common value found in // subprotocol, Handshake will select the first common value found in
// serverProtocols. If a match is found, Handshake adds a response header // serverProtocols. If a match is found, Handshake adds a response header
@ -121,7 +133,7 @@ func negotiateProtocol(clientProtocols, serverProtocols []string) string {
// returned, along with a response header containing the list of protocols the // returned, along with a response header containing the list of protocols the
// server can accept. // server can accept.
func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) { func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) {
clientProtocols := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)] clientProtocols := commaSeparatedHeaderValues(req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)])
if len(clientProtocols) == 0 { if len(clientProtocols) == 0 {
return "", fmt.Errorf("unable to upgrade: %s is required", HeaderProtocolVersion) return "", fmt.Errorf("unable to upgrade: %s is required", HeaderProtocolVersion)
} }

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "18" gitMinor = "18"
gitVersion = "v1.18.10-k3s1" gitVersion = "v1.18.12-k3s1"
gitCommit = "c8d808cfc3c2c00c8d542b84e368ca439987e352" gitCommit = "60c04eedb8755effb61deafd2cc43f4cf44aeee4"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2020-10-15T18:25:57Z" buildDate = "2020-11-12T20:51:09Z"
) )

View File

@ -289,8 +289,12 @@ func (le *LeaderElector) release() bool {
if !le.IsLeader() { if !le.IsLeader() {
return true return true
} }
now := metav1.Now()
leaderElectionRecord := rl.LeaderElectionRecord{ leaderElectionRecord := rl.LeaderElectionRecord{
LeaderTransitions: le.observedRecord.LeaderTransitions, LeaderTransitions: le.observedRecord.LeaderTransitions,
LeaseDurationSeconds: 1,
RenewTime: now,
AcquireTime: now,
} }
if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil { if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil {
klog.Errorf("Failed to release lock: %v", err) klog.Errorf("Failed to release lock: %v", err)

View File

@ -92,8 +92,12 @@ func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord)
cml.cm.Annotations = make(map[string]string) cml.cm.Annotations = make(map[string]string)
} }
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{}) cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
return err if err != nil {
return err
}
cml.cm = cm
return nil
} }
// RecordEvent in leader election while adding meta-data // RecordEvent in leader election while adding meta-data

View File

@ -87,8 +87,12 @@ func (el *EndpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) e
el.e.Annotations = make(map[string]string) el.e.Annotations = make(map[string]string)
} }
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{}) e, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{})
return err if err != nil {
return err
}
el.e = e
return nil
} }
// RecordEvent in leader election while adding meta-data // RecordEvent in leader election while adding meta-data

View File

@ -71,9 +71,14 @@ func (ll *LeaseLock) Update(ctx context.Context, ler LeaderElectionRecord) error
return errors.New("lease not initialized, call get or create first") return errors.New("lease not initialized, call get or create first")
} }
ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler) ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler)
var err error
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ctx, ll.lease, metav1.UpdateOptions{}) lease, err := ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ctx, ll.lease, metav1.UpdateOptions{})
return err if err != nil {
return err
}
ll.lease = lease
return nil
} }
// RecordEvent in leader election while adding meta-data // RecordEvent in leader election while adding meta-data

View File

@ -47,10 +47,8 @@ type tlsCacheKey struct {
keyData string keyData string
certFile string certFile string
keyFile string keyFile string
getCert string
serverName string serverName string
nextProtos string nextProtos string
dial string
disableCompression bool disableCompression bool
} }
@ -59,22 +57,24 @@ func (t tlsCacheKey) String() string {
if len(t.keyData) > 0 { if len(t.keyData) > 0 {
keyText = "<redacted>" keyText = "<redacted>"
} }
return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s disableCompression:%t", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial, t.disableCompression) return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, serverName:%s, disableCompression:%t", t.insecure, t.caData, t.certData, keyText, t.serverName, t.disableCompression)
} }
func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
key, err := tlsConfigKey(config) key, canCache, err := tlsConfigKey(config)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Ensure we only create a single transport for the given TLS options if canCache {
c.mu.Lock() // Ensure we only create a single transport for the given TLS options
defer c.mu.Unlock() c.mu.Lock()
defer c.mu.Unlock()
// See if we already have a custom transport for this config // See if we already have a custom transport for this config
if t, ok := c.transports[key]; ok { if t, ok := c.transports[key]; ok {
return t, nil return t, nil
}
} }
// Get the TLS options for this client config // Get the TLS options for this client config
@ -104,8 +104,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
go dynamicCertDialer.Run(wait.NeverStop) go dynamicCertDialer.Run(wait.NeverStop)
} }
// Cache a single transport for these options transport := utilnet.SetTransportDefaults(&http.Transport{
c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{
Proxy: http.ProxyFromEnvironment, Proxy: http.ProxyFromEnvironment,
TLSHandshakeTimeout: 10 * time.Second, TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig, TLSClientConfig: tlsConfig,
@ -113,22 +112,32 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
DialContext: dial, DialContext: dial,
DisableCompression: config.DisableCompression, DisableCompression: config.DisableCompression,
}) })
return c.transports[key], nil
if canCache {
// Cache a single transport for these options
c.transports[key] = transport
}
return transport, nil
} }
// tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor // tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor
func tlsConfigKey(c *Config) (tlsCacheKey, error) { func tlsConfigKey(c *Config) (tlsCacheKey, bool, error) {
// Make sure ca/key/cert content is loaded // Make sure ca/key/cert content is loaded
if err := loadTLSFiles(c); err != nil { if err := loadTLSFiles(c); err != nil {
return tlsCacheKey{}, err return tlsCacheKey{}, false, err
} }
if c.TLS.GetCert != nil || c.Dial != nil {
// cannot determine equality for functions
return tlsCacheKey{}, false, nil
}
k := tlsCacheKey{ k := tlsCacheKey{
insecure: c.TLS.Insecure, insecure: c.TLS.Insecure,
caData: string(c.TLS.CAData), caData: string(c.TLS.CAData),
getCert: fmt.Sprintf("%p", c.TLS.GetCert),
serverName: c.TLS.ServerName, serverName: c.TLS.ServerName,
nextProtos: strings.Join(c.TLS.NextProtos, ","), nextProtos: strings.Join(c.TLS.NextProtos, ","),
dial: fmt.Sprintf("%p", c.Dial),
disableCompression: c.DisableCompression, disableCompression: c.DisableCompression,
} }
@ -140,5 +149,5 @@ func tlsConfigKey(c *Config) (tlsCacheKey, error) {
k.keyData = string(c.TLS.KeyData) k.keyData = string(c.TLS.KeyData)
} }
return k, nil return k, true, nil
} }

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "18" gitMinor = "18"
gitVersion = "v1.18.10-k3s1" gitVersion = "v1.18.12-k3s1"
gitCommit = "c8d808cfc3c2c00c8d542b84e368ca439987e352" gitCommit = "60c04eedb8755effb61deafd2cc43f4cf44aeee4"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2020-10-15T18:25:57Z" buildDate = "2020-11-12T20:51:09Z"
) )

View File

@ -41,6 +41,8 @@ const (
secretNameTemplate = "azure-storage-account-%s-secret" secretNameTemplate = "azure-storage-account-%s-secret"
defaultSecretNamespace = "default" defaultSecretNamespace = "default"
resourceGroupAnnotation = "kubernetes.io/azure-file-resource-group"
) )
var _ InTreePlugin = &azureFileCSITranslator{} var _ InTreePlugin = &azureFileCSITranslator{}
@ -116,7 +118,13 @@ func (t *azureFileCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
klog.Warningf("getStorageAccountName(%s) returned with error: %v", azureSource.SecretName, err) klog.Warningf("getStorageAccountName(%s) returned with error: %v", azureSource.SecretName, err)
accountName = azureSource.SecretName accountName = azureSource.SecretName
} }
volumeID := fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, "") resourceGroup := ""
if pv.ObjectMeta.Annotations != nil {
if v, ok := pv.ObjectMeta.Annotations[resourceGroupAnnotation]; ok {
resourceGroup = v
}
}
volumeID := fmt.Sprintf(volumeIDTemplate, resourceGroup, accountName, azureSource.ShareName, "")
var ( var (
// refer to https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md // refer to https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
@ -155,6 +163,7 @@ func (t *azureFileCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume)
ReadOnly: csiSource.ReadOnly, ReadOnly: csiSource.ReadOnly,
} }
resourceGroup := ""
if csiSource.NodeStageSecretRef != nil && csiSource.NodeStageSecretRef.Name != "" { if csiSource.NodeStageSecretRef != nil && csiSource.NodeStageSecretRef.Name != "" {
azureSource.SecretName = csiSource.NodeStageSecretRef.Name azureSource.SecretName = csiSource.NodeStageSecretRef.Name
azureSource.SecretNamespace = &csiSource.NodeStageSecretRef.Namespace azureSource.SecretNamespace = &csiSource.NodeStageSecretRef.Namespace
@ -164,16 +173,23 @@ func (t *azureFileCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume)
} }
} }
} else { } else {
_, storageAccount, fileShareName, _, err := getFileShareInfo(csiSource.VolumeHandle) rg, storageAccount, fileShareName, _, err := getFileShareInfo(csiSource.VolumeHandle)
if err != nil { if err != nil {
return nil, err return nil, err
} }
azureSource.ShareName = fileShareName azureSource.ShareName = fileShareName
azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount) azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount)
resourceGroup = rg
} }
pv.Spec.CSI = nil pv.Spec.CSI = nil
pv.Spec.AzureFile = azureSource pv.Spec.AzureFile = azureSource
if resourceGroup != "" {
if pv.ObjectMeta.Annotations == nil {
pv.ObjectMeta.Annotations = map[string]string{}
}
pv.ObjectMeta.Annotations[resourceGroupAnnotation] = resourceGroup
}
return pv, nil return pv, nil
} }

View File

@ -318,7 +318,6 @@ func (d *Helper) evictPods(pods []corev1.Pod, policyGroupVersion string, getPodF
if err != nil { if err != nil {
errors = append(errors, err) errors = append(errors, err)
} }
default:
} }
} }

View File

@ -358,7 +358,6 @@ func startVolumeExpandController(ctx ControllerContext) (http.Handler, bool, err
ctx.ClientBuilder.ClientOrDie("expand-controller"), ctx.ClientBuilder.ClientOrDie("expand-controller"),
ctx.InformerFactory.Core().V1().PersistentVolumeClaims(), ctx.InformerFactory.Core().V1().PersistentVolumeClaims(),
ctx.InformerFactory.Core().V1().PersistentVolumes(), ctx.InformerFactory.Core().V1().PersistentVolumes(),
ctx.InformerFactory.Storage().V1().StorageClasses(),
ctx.Cloud, ctx.Cloud,
plugins, plugins,
csiTranslator, csiTranslator,

View File

@ -998,15 +998,17 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []
if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect { if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect {
return currentReplicas // Scaling is disabled return currentReplicas // Scaling is disabled
} else if *scalingRules.SelectPolicy == autoscalingv2.MinPolicySelect { } else if *scalingRules.SelectPolicy == autoscalingv2.MinPolicySelect {
result = math.MaxInt32
selectPolicyFn = min // For scaling up, the lowest change ('min' policy) produces a minimum value selectPolicyFn = min // For scaling up, the lowest change ('min' policy) produces a minimum value
} else { } else {
result = math.MinInt32
selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change
} }
for _, policy := range scalingRules.Policies { for _, policy := range scalingRules.Policies {
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents) replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents)
periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod
if policy.Type == autoscalingv2.PodsScalingPolicy { if policy.Type == autoscalingv2.PodsScalingPolicy {
proposed = int32(periodStartReplicas + policy.Value) proposed = periodStartReplicas + policy.Value
} else if policy.Type == autoscalingv2.PercentScalingPolicy { } else if policy.Type == autoscalingv2.PercentScalingPolicy {
// the proposal has to be rounded up because the proposed change might not increase the replica count causing the target to never scale up // the proposal has to be rounded up because the proposed change might not increase the replica count causing the target to never scale up
proposed = int32(math.Ceil(float64(periodStartReplicas) * (1 + float64(policy.Value)/100))) proposed = int32(math.Ceil(float64(periodStartReplicas) * (1 + float64(policy.Value)/100)))
@ -1018,14 +1020,16 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []
// calculateScaleDownLimitWithBehavior returns the maximum number of pods that could be deleted for the given HPAScalingRules // calculateScaleDownLimitWithBehavior returns the maximum number of pods that could be deleted for the given HPAScalingRules
func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 { func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
var result int32 = math.MaxInt32 var result int32
var proposed int32 var proposed int32
var selectPolicyFn func(int32, int32) int32 var selectPolicyFn func(int32, int32) int32
if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect { if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect {
return currentReplicas // Scaling is disabled return currentReplicas // Scaling is disabled
} else if *scalingRules.SelectPolicy == autoscalingv2.MinPolicySelect { } else if *scalingRules.SelectPolicy == autoscalingv2.MinPolicySelect {
result = math.MinInt32
selectPolicyFn = max // For scaling down, the lowest change ('min' policy) produces a maximum value selectPolicyFn = max // For scaling down, the lowest change ('min' policy) produces a maximum value
} else { } else {
result = math.MaxInt32
selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change
} }
for _, policy := range scalingRules.Policies { for _, policy := range scalingRules.Policies {

View File

@ -74,8 +74,9 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
return 0, 0, 0, time.Time{}, fmt.Errorf("no pods returned by selector while calculating replica count") return 0, 0, 0, time.Time{}, fmt.Errorf("no pods returned by selector while calculating replica count")
} }
readyPodCount, ignoredPods, missingPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus) readyPodCount, unreadyPods, missingPods, ignoredPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus)
removeMetricsForPods(metrics, ignoredPods) removeMetricsForPods(metrics, ignoredPods)
removeMetricsForPods(metrics, unreadyPods)
requests, err := calculatePodRequests(podList, resource) requests, err := calculatePodRequests(podList, resource)
if err != nil { if err != nil {
return 0, 0, 0, time.Time{}, err return 0, 0, 0, time.Time{}, err
@ -90,7 +91,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
return 0, 0, 0, time.Time{}, err return 0, 0, 0, time.Time{}, err
} }
rebalanceIgnored := len(ignoredPods) > 0 && usageRatio > 1.0 rebalanceIgnored := len(unreadyPods) > 0 && usageRatio > 1.0
if !rebalanceIgnored && len(missingPods) == 0 { if !rebalanceIgnored && len(missingPods) == 0 {
if math.Abs(1.0-usageRatio) <= c.tolerance { if math.Abs(1.0-usageRatio) <= c.tolerance {
// return the current replicas if the change would be too small // return the current replicas if the change would be too small
@ -117,7 +118,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
if rebalanceIgnored { if rebalanceIgnored {
// on a scale-up, treat unready pods as using 0% of the resource request // on a scale-up, treat unready pods as using 0% of the resource request
for podName := range ignoredPods { for podName := range unreadyPods {
metrics[podName] = metricsclient.PodMetric{Value: 0} metrics[podName] = metricsclient.PodMetric{Value: 0}
} }
} }
@ -182,8 +183,9 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
return 0, 0, fmt.Errorf("no pods returned by selector while calculating replica count") return 0, 0, fmt.Errorf("no pods returned by selector while calculating replica count")
} }
readyPodCount, ignoredPods, missingPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus) readyPodCount, unreadyPods, missingPods, ignoredPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus)
removeMetricsForPods(metrics, ignoredPods) removeMetricsForPods(metrics, ignoredPods)
removeMetricsForPods(metrics, unreadyPods)
if len(metrics) == 0 { if len(metrics) == 0 {
return 0, 0, fmt.Errorf("did not receive metrics for any ready pods") return 0, 0, fmt.Errorf("did not receive metrics for any ready pods")
@ -191,7 +193,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
usageRatio, utilization := metricsclient.GetMetricUtilizationRatio(metrics, targetUtilization) usageRatio, utilization := metricsclient.GetMetricUtilizationRatio(metrics, targetUtilization)
rebalanceIgnored := len(ignoredPods) > 0 && usageRatio > 1.0 rebalanceIgnored := len(unreadyPods) > 0 && usageRatio > 1.0
if !rebalanceIgnored && len(missingPods) == 0 { if !rebalanceIgnored && len(missingPods) == 0 {
if math.Abs(1.0-usageRatio) <= c.tolerance { if math.Abs(1.0-usageRatio) <= c.tolerance {
@ -219,7 +221,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
if rebalanceIgnored { if rebalanceIgnored {
// on a scale-up, treat unready pods as using 0% of the resource request // on a scale-up, treat unready pods as using 0% of the resource request
for podName := range ignoredPods { for podName := range unreadyPods {
metrics[podName] = metricsclient.PodMetric{Value: 0} metrics[podName] = metricsclient.PodMetric{Value: 0}
} }
} }
@ -364,16 +366,18 @@ func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(statusReplicas int32
return replicaCount, utilization, timestamp, nil return replicaCount, utilization, timestamp, nil
} }
func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1.ResourceName, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) (readyPodCount int, ignoredPods sets.String, missingPods sets.String) { func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1.ResourceName, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) (readyPodCount int, unreadyPods, missingPods, ignoredPods sets.String) {
missingPods = sets.NewString() missingPods = sets.NewString()
unreadyPods = sets.NewString()
ignoredPods = sets.NewString() ignoredPods = sets.NewString()
for _, pod := range pods { for _, pod := range pods {
if pod.DeletionTimestamp != nil || pod.Status.Phase == v1.PodFailed { if pod.DeletionTimestamp != nil || pod.Status.Phase == v1.PodFailed {
ignoredPods.Insert(pod.Name)
continue continue
} }
// Pending pods are ignored. // Pending pods are ignored.
if pod.Status.Phase == v1.PodPending { if pod.Status.Phase == v1.PodPending {
ignoredPods.Insert(pod.Name) unreadyPods.Insert(pod.Name)
continue continue
} }
// Pods missing metrics. // Pods missing metrics.
@ -384,22 +388,22 @@ func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1
} }
// Unready pods are ignored. // Unready pods are ignored.
if resource == v1.ResourceCPU { if resource == v1.ResourceCPU {
var ignorePod bool var unready bool
_, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady) _, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady)
if condition == nil || pod.Status.StartTime == nil { if condition == nil || pod.Status.StartTime == nil {
ignorePod = true unready = true
} else { } else {
// Pod still within possible initialisation period. // Pod still within possible initialisation period.
if pod.Status.StartTime.Add(cpuInitializationPeriod).After(time.Now()) { if pod.Status.StartTime.Add(cpuInitializationPeriod).After(time.Now()) {
// Ignore sample if pod is unready or one window of metric wasn't collected since last state transition. // Ignore sample if pod is unready or one window of metric wasn't collected since last state transition.
ignorePod = condition.Status == v1.ConditionFalse || metric.Timestamp.Before(condition.LastTransitionTime.Time.Add(metric.Window)) unready = condition.Status == v1.ConditionFalse || metric.Timestamp.Before(condition.LastTransitionTime.Time.Add(metric.Window))
} else { } else {
// Ignore metric if pod is unready and it has never been ready. // Ignore metric if pod is unready and it has never been ready.
ignorePod = condition.Status == v1.ConditionFalse && pod.Status.StartTime.Add(delayOfInitialReadinessStatus).After(condition.LastTransitionTime.Time) unready = condition.Status == v1.ConditionFalse && pod.Status.StartTime.Add(delayOfInitialReadinessStatus).After(condition.LastTransitionTime.Time)
} }
} }
if ignorePod { if unready {
ignoredPods.Insert(pod.Name) unreadyPods.Insert(pod.Name)
continue continue
} }
} }

View File

@ -7,7 +7,6 @@ go_library(
srcs = ["expand_controller.go"], srcs = ["expand_controller.go"],
importpath = "k8s.io/kubernetes/pkg/controller/volume/expand", importpath = "k8s.io/kubernetes/pkg/controller/volume/expand",
deps = [ deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/controller/volume/events:go_default_library", "//pkg/controller/volume/events:go_default_library",
"//pkg/volume:go_default_library", "//pkg/volume:go_default_library",
"//pkg/volume/csimigration:go_default_library", "//pkg/volume/csimigration:go_default_library",
@ -22,12 +21,10 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
@ -68,7 +65,6 @@ go_test(
"//pkg/volume/util/operationexecutor:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library",
"//pkg/volume/util/types:go_default_library", "//pkg/volume/util/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",

View File

@ -32,18 +32,15 @@ import (
"k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1" coreinformers "k8s.io/client-go/informers/core/v1"
storageclassinformer "k8s.io/client-go/informers/storage/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1" corelisters "k8s.io/client-go/listers/core/v1"
storagelisters "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
kcache "k8s.io/client-go/tools/cache" kcache "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
cloudprovider "k8s.io/cloud-provider" cloudprovider "k8s.io/cloud-provider"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/controller/volume/events" "k8s.io/kubernetes/pkg/controller/volume/events"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csimigration" "k8s.io/kubernetes/pkg/volume/csimigration"
@ -82,10 +79,6 @@ type expandController struct {
pvLister corelisters.PersistentVolumeLister pvLister corelisters.PersistentVolumeLister
pvSynced kcache.InformerSynced pvSynced kcache.InformerSynced
// storageClass lister for fetching provisioner name
classLister storagelisters.StorageClassLister
classListerSynced cache.InformerSynced
// cloud provider used by volume host // cloud provider used by volume host
cloud cloudprovider.Interface cloud cloudprovider.Interface
@ -109,7 +102,6 @@ func NewExpandController(
kubeClient clientset.Interface, kubeClient clientset.Interface,
pvcInformer coreinformers.PersistentVolumeClaimInformer, pvcInformer coreinformers.PersistentVolumeClaimInformer,
pvInformer coreinformers.PersistentVolumeInformer, pvInformer coreinformers.PersistentVolumeInformer,
scInformer storageclassinformer.StorageClassInformer,
cloud cloudprovider.Interface, cloud cloudprovider.Interface,
plugins []volume.VolumePlugin, plugins []volume.VolumePlugin,
translator CSINameTranslator, translator CSINameTranslator,
@ -122,8 +114,6 @@ func NewExpandController(
pvcsSynced: pvcInformer.Informer().HasSynced, pvcsSynced: pvcInformer.Informer().HasSynced,
pvLister: pvInformer.Lister(), pvLister: pvInformer.Lister(),
pvSynced: pvInformer.Informer().HasSynced, pvSynced: pvInformer.Informer().HasSynced,
classLister: scInformer.Lister(),
classListerSynced: scInformer.Informer().HasSynced,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "volume_expand"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "volume_expand"),
translator: translator, translator: translator,
csiMigratedPluginManager: csiMigratedPluginManager, csiMigratedPluginManager: csiMigratedPluginManager,
@ -236,19 +226,6 @@ func (expc *expandController) syncHandler(key string) error {
return err return err
} }
claimClass := v1helper.GetPersistentVolumeClaimClass(pvc)
if claimClass == "" {
klog.V(4).Infof("volume expansion is disabled for PVC without StorageClasses: %s", util.ClaimToClaimKey(pvc))
return nil
}
class, err := expc.classLister.Get(claimClass)
if err != nil {
klog.V(4).Infof("failed to expand PVC: %s with error: %v", util.ClaimToClaimKey(pvc), err)
return nil
}
volumeResizerName := class.Provisioner
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false) volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
migratable, err := expc.csiMigratedPluginManager.IsMigratable(volumeSpec) migratable, err := expc.csiMigratedPluginManager.IsMigratable(volumeSpec)
if err != nil { if err != nil {
@ -257,9 +234,15 @@ func (expc *expandController) syncHandler(key string) error {
} }
// handle CSI migration scenarios before invoking FindExpandablePluginBySpec for in-tree // handle CSI migration scenarios before invoking FindExpandablePluginBySpec for in-tree
if migratable { if migratable {
msg := fmt.Sprintf("CSI migration enabled for %s; waiting for external resizer to expand the pvc", volumeResizerName) inTreePluginName, err := expc.csiMigratedPluginManager.GetInTreePluginNameFromSpec(volumeSpec.PersistentVolume, volumeSpec.Volume)
if err != nil {
klog.V(4).Infof("Error getting in-tree plugin name from persistent volume %s: %v", volumeSpec.PersistentVolume.Name, err)
return err
}
msg := fmt.Sprintf("CSI migration enabled for %s; waiting for external resizer to expand the pvc", inTreePluginName)
expc.recorder.Event(pvc, v1.EventTypeNormal, events.ExternalExpanding, msg) expc.recorder.Event(pvc, v1.EventTypeNormal, events.ExternalExpanding, msg)
csiResizerName, err := expc.translator.GetCSINameFromInTreeName(class.Provisioner) csiResizerName, err := expc.translator.GetCSINameFromInTreeName(inTreePluginName)
if err != nil { if err != nil {
errorMsg := fmt.Sprintf("error getting CSI driver name for pvc %s, with error %v", util.ClaimToClaimKey(pvc), err) errorMsg := fmt.Sprintf("error getting CSI driver name for pvc %s, with error %v", util.ClaimToClaimKey(pvc), err)
expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg) expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg)
@ -290,6 +273,7 @@ func (expc *expandController) syncHandler(key string) error {
return nil return nil
} }
volumeResizerName := volumePlugin.GetPluginName()
return expc.expand(pvc, pv, volumeResizerName) return expc.expand(pvc, pv, volumeResizerName)
} }
@ -319,7 +303,7 @@ func (expc *expandController) Run(stopCh <-chan struct{}) {
klog.Infof("Starting expand controller") klog.Infof("Starting expand controller")
defer klog.Infof("Shutting down expand controller") defer klog.Infof("Shutting down expand controller")
if !cache.WaitForNamedCacheSync("expand", stopCh, expc.pvcsSynced, expc.pvSynced, expc.classListerSynced) { if !cache.WaitForNamedCacheSync("expand", stopCh, expc.pvcsSynced, expc.pvSynced) {
return return
} }

View File

@ -1161,9 +1161,10 @@ func (proxier *Proxier) syncProxyRules() {
allowFromNode := false allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() { for _, src := range svcInfo.LoadBalancerSourceRanges() {
writeLine(proxier.natRules, append(args, "-s", src, "-j", string(chosenChain))...) writeLine(proxier.natRules, append(args, "-s", src, "-j", string(chosenChain))...)
// ignore error because it has been validated _, cidr, err := net.ParseCIDR(src)
_, cidr, _ := net.ParseCIDR(src) if err != nil {
if cidr.Contains(proxier.nodeIP) { klog.Errorf("Error parsing %s CIDR in LoadBalancerSourceRanges, dropping: %v", cidr, err)
} else if cidr.Contains(proxier.nodeIP) {
allowFromNode = true allowFromNode = true
} }
} }

View File

@ -146,10 +146,14 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic
topologyKeys: service.Spec.TopologyKeys, topologyKeys: service.Spec.TopologyKeys,
} }
loadBalancerSourceRanges := make([]string, len(service.Spec.LoadBalancerSourceRanges))
for i, sourceRange := range service.Spec.LoadBalancerSourceRanges {
loadBalancerSourceRanges[i] = strings.TrimSpace(sourceRange)
}
if sct.isIPv6Mode == nil { if sct.isIPv6Mode == nil {
info.externalIPs = make([]string, len(service.Spec.ExternalIPs)) info.externalIPs = make([]string, len(service.Spec.ExternalIPs))
info.loadBalancerSourceRanges = make([]string, len(service.Spec.LoadBalancerSourceRanges)) info.loadBalancerSourceRanges = loadBalancerSourceRanges
copy(info.loadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges)
copy(info.externalIPs, service.Spec.ExternalIPs) copy(info.externalIPs, service.Spec.ExternalIPs)
// Deep-copy in case the service instance changes // Deep-copy in case the service instance changes
info.loadBalancerStatus = *service.Status.LoadBalancer.DeepCopy() info.loadBalancerStatus = *service.Status.LoadBalancer.DeepCopy()
@ -162,7 +166,7 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic
if len(incorrectIPs) > 0 { if len(incorrectIPs) > 0 {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "externalIPs", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID) utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "externalIPs", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
} }
info.loadBalancerSourceRanges, incorrectIPs = utilproxy.FilterIncorrectCIDRVersion(service.Spec.LoadBalancerSourceRanges, *sct.isIPv6Mode) info.loadBalancerSourceRanges, incorrectIPs = utilproxy.FilterIncorrectCIDRVersion(loadBalancerSourceRanges, *sct.isIPv6Mode)
if len(incorrectIPs) > 0 { if len(incorrectIPs) > 0 {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "loadBalancerSourceRanges", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID) utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "loadBalancerSourceRanges", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
} }

View File

@ -41,6 +41,7 @@ func NewHeuristicWatchCacheSizes(expectedRAMCapacityMB int) map[schema.GroupReso
watchCacheSizes[schema.GroupResource{Resource: "pods"}] = maxInt(50*clusterSize, 1000) watchCacheSizes[schema.GroupResource{Resource: "pods"}] = maxInt(50*clusterSize, 1000)
watchCacheSizes[schema.GroupResource{Resource: "services"}] = maxInt(5*clusterSize, 1000) watchCacheSizes[schema.GroupResource{Resource: "services"}] = maxInt(5*clusterSize, 1000)
watchCacheSizes[schema.GroupResource{Resource: "events"}] = 0 watchCacheSizes[schema.GroupResource{Resource: "events"}] = 0
watchCacheSizes[schema.GroupResource{Resource: "events", Group: "events.k8s.io"}] = 0
watchCacheSizes[schema.GroupResource{Resource: "apiservices", Group: "apiregistration.k8s.io"}] = maxInt(5*clusterSize, 1000) watchCacheSizes[schema.GroupResource{Resource: "apiservices", Group: "apiregistration.k8s.io"}] = maxInt(5*clusterSize, 1000)
watchCacheSizes[schema.GroupResource{Resource: "leases", Group: "coordination.k8s.io"}] = maxInt(5*clusterSize, 1000) watchCacheSizes[schema.GroupResource{Resource: "leases", Group: "coordination.k8s.io"}] = maxInt(5*clusterSize, 1000)
return watchCacheSizes return watchCacheSizes

View File

@ -67,6 +67,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library",
], ],

View File

@ -294,7 +294,7 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C
} }
// However, "empty" preFilterState is legit which tolerates every toSchedule Pod. // However, "empty" preFilterState is legit which tolerates every toSchedule Pod.
if len(s.TpPairToMatchNum) == 0 || len(s.Constraints) == 0 { if len(s.Constraints) == 0 {
return nil return nil
} }

View File

@ -252,8 +252,10 @@ func (r *Resource) SetMaxResource(rl v1.ResourceList) {
r.MilliCPU = cpu r.MilliCPU = cpu
} }
case v1.ResourceEphemeralStorage: case v1.ResourceEphemeralStorage:
if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage { if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
r.EphemeralStorage = ephemeralStorage if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage {
r.EphemeralStorage = ephemeralStorage
}
} }
default: default:
if v1helper.IsScalarResourceName(rName) { if v1helper.IsScalarResourceName(rName) {

View File

@ -284,8 +284,17 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro
} }
// UnmountDevice unmounts the volume on the node // UnmountDevice unmounts the volume on the node
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error { func (d *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
err := mount.CleanupMountPoint(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()), false) if runtime.GOOS == "windows" {
// Flush data cache for windows because it does not do so automatically during unmount device
exec := d.plugin.host.GetExec(d.plugin.GetPluginName())
err := util.WriteVolumeCache(deviceMountPath, exec)
if err != nil {
return err
}
}
err := mount.CleanupMountPoint(deviceMountPath, d.plugin.host.GetMounter(d.plugin.GetPluginName()), false)
if err == nil { if err == nil {
klog.V(2).Infof("azureDisk - Device %s was unmounted", deviceMountPath) klog.V(2).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
} else { } else {

View File

@ -20,6 +20,7 @@ package azure
import ( import (
"net/http" "net/http"
"regexp"
"strings" "strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
@ -42,6 +43,12 @@ const (
// operationCancledErrorMessage means the operation is canceled by another new operation. // operationCancledErrorMessage means the operation is canceled by another new operation.
operationCancledErrorMessage = "canceledandsupersededduetoanotheroperation" operationCancledErrorMessage = "canceledandsupersededduetoanotheroperation"
referencedResourceNotProvisionedMessageCode = "ReferencedResourceNotProvisioned"
)
var (
pipErrorMessageRE = regexp.MustCompile(`(?:.*)/subscriptions/(?:.*)/resourceGroups/(.*)/providers/Microsoft.Network/publicIPAddresses/([^\s]+)(?:.*)`)
) )
// RequestBackoff if backoff is disabled in cloud provider it // RequestBackoff if backoff is disabled in cloud provider it
@ -193,12 +200,39 @@ func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer)
klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", *lb.Name) klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", *lb.Name)
az.lbCache.Delete(*lb.Name) az.lbCache.Delete(*lb.Name)
} }
retryErrorMessage := rerr.Error().Error()
// Invalidate the cache because another new operation has canceled the current request. // Invalidate the cache because another new operation has canceled the current request.
if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCancledErrorMessage) { if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCancledErrorMessage) {
klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", *lb.Name) klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", *lb.Name)
az.lbCache.Delete(*lb.Name) az.lbCache.Delete(*lb.Name)
} }
// The LB update may fail because the referenced PIP is not in the Succeeded provisioning state
if strings.Contains(strings.ToLower(retryErrorMessage), strings.ToLower(referencedResourceNotProvisionedMessageCode)) {
matches := pipErrorMessageRE.FindStringSubmatch(retryErrorMessage)
if len(matches) != 3 {
klog.Warningf("Failed to parse the retry error message %s", retryErrorMessage)
return rerr.Error()
}
pipRG, pipName := matches[1], matches[2]
klog.V(3).Infof("The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it", pipName, to.String(lb.Name))
pip, _, err := az.getPublicIPAddress(pipRG, pipName)
if err != nil {
klog.Warningf("Failed to get the public IP %s in resource group %s: %v", pipName, pipRG, err)
return rerr.Error()
}
// Perform a dummy update to fix the provisioning state
err = az.CreateOrUpdatePIP(service, pipRG, pip)
if err != nil {
klog.Warningf("Failed to update the public IP %s in resource group %s: %v", pipName, pipRG, err)
return rerr.Error()
}
// Invalidate the LB cache, return the error, and the controller manager
// would retry the LB update in the next reconcile loop
az.lbCache.Delete(*lb.Name)
}
return rerr.Error() return rerr.Error()
} }

View File

@ -44,6 +44,9 @@ const (
maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
maxDisksPerStorageAccounts = 60 maxDisksPerStorageAccounts = 60
storageAccountUtilizationBeforeGrowing = 0.5 storageAccountUtilizationBeforeGrowing = 0.5
// Disk Caching is not supported for disks 4 TiB and larger
// https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching
diskCachingLimit = 4096 // GiB
maxLUN = 64 // max number of LUNs per VM maxLUN = 64 // max number of LUNs per VM
errLeaseFailed = "AcquireDiskLeaseFailed" errLeaseFailed = "AcquireDiskLeaseFailed"
@ -156,10 +159,21 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
return -1, danglingErr return -1, danglingErr
} }
if disk.DiskProperties != nil && disk.DiskProperties.Encryption != nil && if disk.DiskProperties != nil {
disk.DiskProperties.Encryption.DiskEncryptionSetID != nil { if disk.DiskProperties.DiskSizeGB != nil && *disk.DiskProperties.DiskSizeGB >= diskCachingLimit && cachingMode != compute.CachingTypesNone {
diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID // Disk Caching is not supported for disks 4 TiB and larger
// https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching
cachingMode = compute.CachingTypesNone
klog.Warningf("size of disk(%s) is %dGB which is bigger than limit(%dGB), set cacheMode as None",
diskURI, *disk.DiskProperties.DiskSizeGB, diskCachingLimit)
}
if disk.DiskProperties.Encryption != nil &&
disk.DiskProperties.Encryption.DiskEncryptionSetID != nil {
diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID
}
} }
if v, ok := disk.Tags[WriteAcceleratorEnabled]; ok { if v, ok := disk.Tags[WriteAcceleratorEnabled]; ok {
if v != nil && strings.EqualFold(*v, "true") { if v != nil && strings.EqualFold(*v, "true") {
writeAcceleratorEnabled = true writeAcceleratorEnabled = true

View File

@ -433,7 +433,11 @@ func (g *Cloud) getInstancesByNames(names []string) ([]*gceInstance, error) {
return nil, err return nil, err
} }
if len(foundInstances) != len(names) { if len(foundInstances) != len(names) {
return nil, cloudprovider.InstanceNotFound if len(foundInstances) == 0 {
// return error so the TargetPool nodecount does not drop to 0 unexpectedly.
return nil, cloudprovider.InstanceNotFound
}
klog.Warningf("getFoundInstanceByNames - input instances %d, found %d. Continuing LoadBalancer Update", len(names), len(foundInstances))
} }
return foundInstances, nil return foundInstances, nil
} }

42
vendor/modules.txt vendored
View File

@ -1141,7 +1141,7 @@ gopkg.in/square/go-jose.v2/jwt
gopkg.in/warnings.v0 gopkg.in/warnings.v0
# gopkg.in/yaml.v2 v2.2.8 # gopkg.in/yaml.v2 v2.2.8
gopkg.in/yaml.v2 gopkg.in/yaml.v2
# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.10-k3s1 # k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.12-k3s1
k8s.io/api/admission/v1 k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1 k8s.io/api/admission/v1beta1
k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1
@ -1185,7 +1185,7 @@ k8s.io/api/settings/v1alpha1
k8s.io/api/storage/v1 k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1 k8s.io/api/storage/v1beta1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.10-k3s1 # k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.12-k3s1
k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apihelpers
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
@ -1225,7 +1225,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
k8s.io/apiextensions-apiserver/pkg/registry/customresource k8s.io/apiextensions-apiserver/pkg/registry/customresource
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.10-k3s1 # k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.12-k3s1
k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/meta
@ -1287,7 +1287,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.10-k3s1 # k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.12-k3s1
k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/configuration
k8s.io/apiserver/pkg/admission/initializer k8s.io/apiserver/pkg/admission/initializer
@ -1417,7 +1417,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.10-k3s1 # k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.12-k3s1
k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/kustomize k8s.io/cli-runtime/pkg/kustomize
k8s.io/cli-runtime/pkg/kustomize/k8sdeps k8s.io/cli-runtime/pkg/kustomize/k8sdeps
@ -1430,7 +1430,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource k8s.io/cli-runtime/pkg/resource
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.10-k3s1 # k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.12-k3s1
k8s.io/client-go/discovery k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached k8s.io/client-go/discovery/cached
k8s.io/client-go/discovery/cached/disk k8s.io/client-go/discovery/cached/disk
@ -1664,7 +1664,7 @@ k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue k8s.io/client-go/util/workqueue
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.10-k3s1 # k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.12-k3s1
k8s.io/cloud-provider k8s.io/cloud-provider
k8s.io/cloud-provider/api k8s.io/cloud-provider/api
k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/node/helpers
@ -1672,13 +1672,13 @@ k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/volume/helpers k8s.io/cloud-provider/volume/helpers
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.10-k3s1 # k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.12-k3s1
k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/api
k8s.io/cluster-bootstrap/token/jws k8s.io/cluster-bootstrap/token/jws
k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/token/util
k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/secrets
k8s.io/cluster-bootstrap/util/tokens k8s.io/cluster-bootstrap/util/tokens
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.10-k3s1 # k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.12-k3s1
k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/args
k8s.io/code-generator/cmd/client-gen/generators k8s.io/code-generator/cmd/client-gen/generators
k8s.io/code-generator/cmd/client-gen/generators/fake k8s.io/code-generator/cmd/client-gen/generators/fake
@ -1693,7 +1693,7 @@ k8s.io/code-generator/cmd/lister-gen/args
k8s.io/code-generator/cmd/lister-gen/generators k8s.io/code-generator/cmd/lister-gen/generators
k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/namer
k8s.io/code-generator/pkg/util k8s.io/code-generator/pkg/util
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.10-k3s1 # k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.12-k3s1
k8s.io/component-base/cli/flag k8s.io/component-base/cli/flag
k8s.io/component-base/cli/globalflag k8s.io/component-base/cli/globalflag
k8s.io/component-base/codec k8s.io/component-base/codec
@ -1711,10 +1711,10 @@ k8s.io/component-base/metrics/prometheus/workqueue
k8s.io/component-base/metrics/testutil k8s.io/component-base/metrics/testutil
k8s.io/component-base/version k8s.io/component-base/version
k8s.io/component-base/version/verflag k8s.io/component-base/version/verflag
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.10-k3s1 # k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.12-k3s1
k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis
k8s.io/cri-api/pkg/apis/runtime/v1alpha2 k8s.io/cri-api/pkg/apis/runtime/v1alpha2
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.10-k3s1 # k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.12-k3s1
k8s.io/csi-translation-lib k8s.io/csi-translation-lib
k8s.io/csi-translation-lib/plugins k8s.io/csi-translation-lib/plugins
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 # k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
@ -1729,7 +1729,7 @@ k8s.io/gengo/types
k8s.io/heapster/metrics/api/v1/types k8s.io/heapster/metrics/api/v1/types
# k8s.io/klog v1.0.0 # k8s.io/klog v1.0.0
k8s.io/klog k8s.io/klog
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.10-k3s1 # k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.12-k3s1
k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration
k8s.io/kube-aggregator/pkg/apis/apiregistration/install k8s.io/kube-aggregator/pkg/apis/apiregistration/install
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
@ -1757,7 +1757,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
k8s.io/kube-aggregator/pkg/registry/apiservice k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
k8s.io/kube-aggregator/pkg/registry/apiservice/rest k8s.io/kube-aggregator/pkg/registry/apiservice/rest
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.10-k3s1 # k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.12-k3s1
k8s.io/kube-controller-manager/config/v1alpha1 k8s.io/kube-controller-manager/config/v1alpha1
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 # k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
k8s.io/kube-openapi/pkg/aggregator k8s.io/kube-openapi/pkg/aggregator
@ -1768,14 +1768,14 @@ k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util
k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/util/proto/validation
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.10-k3s1 # k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.12-k3s1
k8s.io/kube-proxy/config/v1alpha1 k8s.io/kube-proxy/config/v1alpha1
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.10-k3s1 # k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.12-k3s1
k8s.io/kube-scheduler/config/v1 k8s.io/kube-scheduler/config/v1
k8s.io/kube-scheduler/config/v1alpha1 k8s.io/kube-scheduler/config/v1alpha1
k8s.io/kube-scheduler/config/v1alpha2 k8s.io/kube-scheduler/config/v1alpha2
k8s.io/kube-scheduler/extender/v1 k8s.io/kube-scheduler/extender/v1
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.10-k3s1 # k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.12-k3s1
k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/apps
k8s.io/kubectl/pkg/cmd k8s.io/kubectl/pkg/cmd
k8s.io/kubectl/pkg/cmd/annotate k8s.io/kubectl/pkg/cmd/annotate
@ -1850,11 +1850,11 @@ k8s.io/kubectl/pkg/util/storage
k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/templates
k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/validation k8s.io/kubectl/pkg/validation
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.10-k3s1 # k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.12-k3s1
k8s.io/kubelet/config/v1beta1 k8s.io/kubelet/config/v1beta1
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/pluginregistration/v1
# k8s.io/kubernetes v1.18.10 => github.com/rancher/kubernetes v1.18.10-k3s1 # k8s.io/kubernetes v1.18.12 => github.com/rancher/kubernetes v1.18.12-k3s1
k8s.io/kubernetes/cmd/cloud-controller-manager/app k8s.io/kubernetes/cmd/cloud-controller-manager/app
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
@ -2597,7 +2597,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/simple
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
k8s.io/kubernetes/third_party/forked/ipvs k8s.io/kubernetes/third_party/forked/ipvs
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.10-k3s1 # k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.12-k3s1
k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/aws
k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure
k8s.io/legacy-cloud-providers/azure/auth k8s.io/legacy-cloud-providers/azure/auth
@ -2628,7 +2628,7 @@ k8s.io/legacy-cloud-providers/openstack
k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere
k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.10-k3s1 # k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.12-k3s1
k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2