mirror of https://github.com/k3s-io/k3s
Update Kubernetes to v1.18.10-k3s1
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>pull/2443/head v1.18.10+k3s1
parent
3661f56604
commit
6fa9730656
52
go.mod
52
go.mod
|
@ -33,31 +33,31 @@ replace (
|
||||||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
||||||
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
||||||
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
|
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
|
||||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.9-k3s1
|
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.10-k3s1
|
||||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.9-k3s1
|
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.10-k3s1
|
||||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.9-k3s1
|
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.10-k3s1
|
||||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.9-k3s1
|
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.10-k3s1
|
||||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.9-k3s1
|
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.10-k3s1
|
||||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.9-k3s1
|
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.10-k3s1
|
||||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.9-k3s1
|
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.10-k3s1
|
||||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.9-k3s1
|
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.10-k3s1
|
||||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.9-k3s1
|
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.10-k3s1
|
||||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.9-k3s1
|
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.10-k3s1
|
||||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.9-k3s1
|
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.10-k3s1
|
||||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.9-k3s1
|
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.10-k3s1
|
||||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.9-k3s1
|
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.10-k3s1
|
||||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.9-k3s1
|
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.10-k3s1
|
||||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.9-k3s1
|
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.10-k3s1
|
||||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.9-k3s1
|
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.10-k3s1
|
||||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.9-k3s1
|
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.10-k3s1
|
||||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.9-k3s1
|
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.10-k3s1
|
||||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.9-k3s1
|
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.10-k3s1
|
||||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.9-k3s1
|
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.10-k3s1
|
||||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.9-k3s1
|
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.10-k3s1
|
||||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.9-k3s1
|
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.10-k3s1
|
||||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.9-k3s1
|
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.10-k3s1
|
||||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.9-k3s1
|
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.10-k3s1
|
||||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.9-k3s1
|
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.10-k3s1
|
||||||
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -123,5 +123,5 @@ require (
|
||||||
k8s.io/component-base v0.0.0
|
k8s.io/component-base v0.0.0
|
||||||
k8s.io/cri-api v0.0.0
|
k8s.io/cri-api v0.0.0
|
||||||
k8s.io/klog v1.0.0
|
k8s.io/klog v1.0.0
|
||||||
k8s.io/kubernetes v1.18.0
|
k8s.io/kubernetes v1.18.10
|
||||||
)
|
)
|
||||||
|
|
86
go.sum
86
go.sum
|
@ -637,49 +637,49 @@ github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd h1:KPnQ
|
||||||
github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd/go.mod h1:QYmg8cqWPPfIbpEuhtJbEdWwA6PEKSY016Z6EdfL9+8=
|
github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd/go.mod h1:QYmg8cqWPPfIbpEuhtJbEdWwA6PEKSY016Z6EdfL9+8=
|
||||||
github.com/rancher/kine v0.4.0 h1:1IhWy3TzjExG8xnj46eyUEWdzqNAD1WrgL4eEBKm6Uc=
|
github.com/rancher/kine v0.4.0 h1:1IhWy3TzjExG8xnj46eyUEWdzqNAD1WrgL4eEBKm6Uc=
|
||||||
github.com/rancher/kine v0.4.0/go.mod h1:IImtCJ68AIkE+VY/kUI0NkyJL5q5WzO8QvMsSXqbrpA=
|
github.com/rancher/kine v0.4.0/go.mod h1:IImtCJ68AIkE+VY/kUI0NkyJL5q5WzO8QvMsSXqbrpA=
|
||||||
github.com/rancher/kubernetes v1.18.9-k3s1 h1:LLJLc7p+Xmt3KeF7jkLz2MuS0+0KRE9fTDprUX4Y9RA=
|
github.com/rancher/kubernetes v1.18.10-k3s1 h1:RS7DmQf0U/iNhGO0MIvfmzycCowMSvUQhwZNVdydWm8=
|
||||||
github.com/rancher/kubernetes v1.18.9-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8=
|
github.com/rancher/kubernetes v1.18.10-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.9-k3s1 h1:qTAC4DGioKxoy3b0JFTWjw3ShGcKqjNTUh2AKfvkMBU=
|
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.10-k3s1 h1:tZfjkh+JjJ3omi19P8caLdN9ql5Ftpk0tWAmJh4Bo5E=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.9-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
|
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.10-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.9-k3s1 h1:u3lZHqe48csUFUZycU4W8hyvgKt3PJZqeALxm9t5PKA=
|
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.10-k3s1 h1:Isl/Gi7wCyZ31Hpkw+o3SAYx+SP9VcL5Udbwndq5MfE=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.9-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
|
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.10-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.9-k3s1 h1:/EItzAufFgB0GbygS2dotV2HY30U8HoWu3c7QSw9P9M=
|
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.10-k3s1 h1:zXR4Em0Og8HoZU14b6gMb74n9JUMYd2pzzN8tFvHniw=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.9-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o=
|
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.10-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.9-k3s1 h1:ipSuqeFd8lmKFyZk5VabMOOgViNrItz61J9QZS6DNpY=
|
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.10-k3s1 h1:v6Yeydb3yeJB6+MQVGGjIOQcYWkHpfr1WdNy7N+9kdg=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.9-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw=
|
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.10-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.9-k3s1 h1:BL+V4zSgs77zGy0f1XqnKXs3lJW0pBw9zR9pT6bQtMA=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.10-k3s1 h1:OU2/souymetniKHDb8S6RmrXVsBV/WJuY9spVQBb+Dc=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.9-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.10-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.9-k3s1 h1:dA86m3H1M/oFV9VICMMxNStfVeQaZIuFxg7GAVEkNqI=
|
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.10-k3s1 h1:4/FsynISjNWhC+c0JVFawCnKtM2Q4jzvP6xaVIG1u6o=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.9-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos=
|
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.10-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.9-k3s1 h1:t728oClyjfhX0VI9o3J8X7POJiogVDZK5rLPVIGnd8g=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.10-k3s1 h1:+GhEM4DXe0BMWEcss49rNkFLHp032Ybt/FfIjtjiAqY=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.9-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.10-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.9-k3s1 h1:SzGana3eKbrMGFaV4FKFZIoIz2t8sVaczZoCCXMN1OU=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.10-k3s1 h1:/GzhswAi+6qxukCTwLlPKLPup2xcZ1ZxM0RI525do/o=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.9-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.10-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.9-k3s1 h1:QOCk40d0s4/IQvUnYXu5hdGsPkhdnGiS6YxpMikUKJM=
|
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.10-k3s1 h1:OjdqC+CK/kcK0Lq3LX2MCBSxzXc0yDNS9UqwDoRLgxU=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.9-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
|
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.10-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.9-k3s1 h1:MNmBot3Rj6QDVxigJvcxXWOPKCm5NM8ACEDk1nvXT/4=
|
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.10-k3s1 h1:5XsuCxrfl1s08MRm+4i98l4fsCW9KbAFdGXV+x3Anqw=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.9-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
|
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.10-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.9-k3s1 h1:KRlY1Hljsh/qBbB/DASEBdYMPxFRNkMpOltpIURjMTI=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.10-k3s1 h1:dFGSPzS/pc3yjJ50dafrybu9tzjuWXX/qxngAWgQT5g=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.9-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.10-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.9-k3s1 h1:nkGWt+by8lBBmOeytS81Xt4vnn9OMA1DftLKtObbxdE=
|
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.10-k3s1 h1:siigEKE3XiGkwmD0fVWE26l+xiFrCZv+xfvTttheHyE=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.9-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
|
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.10-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.9-k3s1 h1:A7Elvwo8Cy14hhKAjDuvkaE1xFLqJehqdLQVkM6iBwM=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.10-k3s1 h1:uPfAAj4yin/8X3j63jxyLqR8qpM7Zo6yD3zfy4leifI=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.9-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.10-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.9-k3s1 h1:+S7ag7Rqe5KCzY+i5rN45ckwBIAc/h9wmj2ol0NCdjU=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.10-k3s1 h1:DIA6KBLS1vRYYT9S3XZXPn8Q82g+AvYE9LJcb9rwRfM=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.9-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.10-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.9-k3s1 h1:jwnEH/KEl67g1bb+kOAW+UlA5pSqf0h969fi88y4U2E=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.10-k3s1 h1:UBMPWPH3k09FyAKPMBrS5FG9j6e7CAZrouSOFgQLK2A=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.9-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.10-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.9-k3s1 h1:0Ai5nstkLanNLfrix1pFsVfvc8NLvxCEDwS5Qsf5Dic=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.10-k3s1 h1:OdBE36l4S/XNpxI6OgFkVPZGuAjB1ePecuCIpSDkA0o=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.9-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.10-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.9-k3s1 h1:qYbKao9YdKDNZyzbQeo+uIuBCGviy3PbkVSE6j0zAjk=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.10-k3s1 h1:aTvmtU9ML9ME/hk1xph6MSpa7hwR4w2I1wkWcYWPqj4=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.9-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.10-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.9-k3s1 h1:ebsW5Uu/XIzjnO9P1okUvj1IGmspfmNaUpynfHupUPE=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.10-k3s1 h1:+uo+XJdwIT74zIHV7Rz7r4BtnZU0XO1u7C0N5xIEJrs=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.9-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.10-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.9-k3s1 h1:PmGk7TPAqdd/ZB3BhBbUPUxqgOiXRFlyjP54l3bnWu8=
|
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.10-k3s1 h1:S0KK6mxYic3gfzLR/EE7+BByZ64dOVo7j0PizDehcIY=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.9-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
|
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.10-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.9-k3s1 h1:yeuKOUN7YSyZ5uEPN5lZztLKuF5BLSQC37hAQGxa+KA=
|
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.10-k3s1 h1:LtO8DDrNAzK113XtUrZEvFTfZ4WxaGULIVtOkxVuAJQ=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.9-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
|
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.10-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.9-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
|
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.10-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
|
||||||
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
|
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
|
||||||
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
|
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
|
||||||
github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI=
|
github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI=
|
||||||
|
|
|
@ -440,6 +440,14 @@ func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Obje
|
||||||
|
|
||||||
func (s *store) Count(key string) (int64, error) {
|
func (s *store) Count(key string) (int64, error) {
|
||||||
key = path.Join(s.pathPrefix, key)
|
key = path.Join(s.pathPrefix, key)
|
||||||
|
|
||||||
|
// We need to make sure the key ended with "/" so that we only get children "directories".
|
||||||
|
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
|
||||||
|
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
|
||||||
|
if !strings.HasSuffix(key, "/") {
|
||||||
|
key += "/"
|
||||||
|
}
|
||||||
|
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())
|
getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())
|
||||||
metrics.RecordEtcdRequestLatency("listWithCount", key, startTime)
|
metrics.RecordEtcdRequestLatency("listWithCount", key, startTime)
|
||||||
|
|
|
@ -3,8 +3,8 @@ package version
|
||||||
var (
|
var (
|
||||||
gitMajor = "1"
|
gitMajor = "1"
|
||||||
gitMinor = "18"
|
gitMinor = "18"
|
||||||
gitVersion = "v1.18.9-k3s1"
|
gitVersion = "v1.18.10-k3s1"
|
||||||
gitCommit = "f1d9dca4e9681e74faee7359af3cc2df01a4b6d6"
|
gitCommit = "c8d808cfc3c2c00c8d542b84e368ca439987e352"
|
||||||
gitTreeState = "clean"
|
gitTreeState = "clean"
|
||||||
buildDate = "2020-09-16T22:20:24Z"
|
buildDate = "2020-10-15T18:25:57Z"
|
||||||
)
|
)
|
||||||
|
|
|
@ -3,8 +3,8 @@ package version
|
||||||
var (
|
var (
|
||||||
gitMajor = "1"
|
gitMajor = "1"
|
||||||
gitMinor = "18"
|
gitMinor = "18"
|
||||||
gitVersion = "v1.18.9-k3s1"
|
gitVersion = "v1.18.10-k3s1"
|
||||||
gitCommit = "f1d9dca4e9681e74faee7359af3cc2df01a4b6d6"
|
gitCommit = "c8d808cfc3c2c00c8d542b84e368ca439987e352"
|
||||||
gitTreeState = "clean"
|
gitTreeState = "clean"
|
||||||
buildDate = "2020-09-16T22:20:24Z"
|
buildDate = "2020-10-15T18:25:57Z"
|
||||||
)
|
)
|
||||||
|
|
|
@ -206,6 +206,13 @@ func isLess(i, j reflect.Value) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
||||||
case reflect.Interface:
|
case reflect.Interface:
|
||||||
|
if i.IsNil() && j.IsNil() {
|
||||||
|
return false, nil
|
||||||
|
} else if i.IsNil() {
|
||||||
|
return true, nil
|
||||||
|
} else if j.IsNil() {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
switch itype := i.Interface().(type) {
|
switch itype := i.Interface().(type) {
|
||||||
case uint8:
|
case uint8:
|
||||||
if jtype, ok := j.Interface().(uint8); ok {
|
if jtype, ok := j.Interface().(uint8); ok {
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
package(default_visibility = ["//visibility:public"])
|
|
||||||
|
|
||||||
load(
|
|
||||||
"@io_bazel_rules_go//go:def.bzl",
|
|
||||||
"go_library",
|
|
||||||
"go_test",
|
|
||||||
)
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "go_default_library",
|
|
||||||
srcs = ["util.go"],
|
|
||||||
importpath = "k8s.io/kubernetes/pkg/api/endpoints",
|
|
||||||
deps = [
|
|
||||||
"//pkg/apis/core:go_default_library",
|
|
||||||
"//pkg/util/hash:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_test(
|
|
||||||
name = "go_default_test",
|
|
||||||
srcs = ["util_test.go"],
|
|
||||||
embed = [":go_default_library"],
|
|
||||||
deps = [
|
|
||||||
"//pkg/apis/core:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
|
||||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
filegroup(
|
|
||||||
name = "package-srcs",
|
|
||||||
srcs = glob(["**"]),
|
|
||||||
tags = ["automanaged"],
|
|
||||||
visibility = ["//visibility:private"],
|
|
||||||
)
|
|
||||||
|
|
||||||
filegroup(
|
|
||||||
name = "all-srcs",
|
|
||||||
srcs = [":package-srcs"],
|
|
||||||
tags = ["automanaged"],
|
|
||||||
)
|
|
|
@ -1,10 +0,0 @@
|
||||||
# See the OWNERS docs at https://go.k8s.io/owners
|
|
||||||
|
|
||||||
reviewers:
|
|
||||||
- thockin
|
|
||||||
- smarterclayton
|
|
||||||
- mikedanese
|
|
||||||
- sttts
|
|
||||||
- eparis
|
|
||||||
- resouer
|
|
||||||
- david-mcmahon
|
|
|
@ -1,235 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2015 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package endpoints
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"hash"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
|
||||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RepackSubsets takes a slice of EndpointSubset objects, expands it to the full
|
|
||||||
// representation, and then repacks that into the canonical layout. This
|
|
||||||
// ensures that code which operates on these objects can rely on the common
|
|
||||||
// form for things like comparison. The result is a newly allocated slice.
|
|
||||||
func RepackSubsets(subsets []api.EndpointSubset) []api.EndpointSubset {
|
|
||||||
// First map each unique port definition to the sets of hosts that
|
|
||||||
// offer it.
|
|
||||||
allAddrs := map[addressKey]*api.EndpointAddress{}
|
|
||||||
portToAddrReadyMap := map[api.EndpointPort]addressSet{}
|
|
||||||
for i := range subsets {
|
|
||||||
if len(subsets[i].Ports) == 0 {
|
|
||||||
// Don't discard endpoints with no ports defined, use a sentinel.
|
|
||||||
mapAddressesByPort(&subsets[i], api.EndpointPort{Port: -1}, allAddrs, portToAddrReadyMap)
|
|
||||||
} else {
|
|
||||||
for _, port := range subsets[i].Ports {
|
|
||||||
mapAddressesByPort(&subsets[i], port, allAddrs, portToAddrReadyMap)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next, map the sets of hosts to the sets of ports they offer.
|
|
||||||
// Go does not allow maps or slices as keys to maps, so we have
|
|
||||||
// to synthesize an artificial key and do a sort of 2-part
|
|
||||||
// associative entity.
|
|
||||||
type keyString string
|
|
||||||
keyToAddrReadyMap := map[keyString]addressSet{}
|
|
||||||
addrReadyMapKeyToPorts := map[keyString][]api.EndpointPort{}
|
|
||||||
for port, addrs := range portToAddrReadyMap {
|
|
||||||
key := keyString(hashAddresses(addrs))
|
|
||||||
keyToAddrReadyMap[key] = addrs
|
|
||||||
if port.Port > 0 { // avoid sentinels
|
|
||||||
addrReadyMapKeyToPorts[key] = append(addrReadyMapKeyToPorts[key], port)
|
|
||||||
} else {
|
|
||||||
if _, found := addrReadyMapKeyToPorts[key]; !found {
|
|
||||||
// Force it to be present in the map
|
|
||||||
addrReadyMapKeyToPorts[key] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next, build the N-to-M association the API wants.
|
|
||||||
final := []api.EndpointSubset{}
|
|
||||||
for key, ports := range addrReadyMapKeyToPorts {
|
|
||||||
var readyAddrs, notReadyAddrs []api.EndpointAddress
|
|
||||||
for addr, ready := range keyToAddrReadyMap[key] {
|
|
||||||
if ready {
|
|
||||||
readyAddrs = append(readyAddrs, *addr)
|
|
||||||
} else {
|
|
||||||
notReadyAddrs = append(notReadyAddrs, *addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
final = append(final, api.EndpointSubset{Addresses: readyAddrs, NotReadyAddresses: notReadyAddrs, Ports: ports})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, sort it.
|
|
||||||
return SortSubsets(final)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The sets of hosts must be de-duped, using IP+UID as the key.
|
|
||||||
type addressKey struct {
|
|
||||||
ip string
|
|
||||||
uid types.UID
|
|
||||||
}
|
|
||||||
|
|
||||||
// mapAddressesByPort adds all ready and not-ready addresses into a map by a single port.
|
|
||||||
func mapAddressesByPort(subset *api.EndpointSubset, port api.EndpointPort, allAddrs map[addressKey]*api.EndpointAddress, portToAddrReadyMap map[api.EndpointPort]addressSet) {
|
|
||||||
for k := range subset.Addresses {
|
|
||||||
mapAddressByPort(&subset.Addresses[k], port, true, allAddrs, portToAddrReadyMap)
|
|
||||||
}
|
|
||||||
for k := range subset.NotReadyAddresses {
|
|
||||||
mapAddressByPort(&subset.NotReadyAddresses[k], port, false, allAddrs, portToAddrReadyMap)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// mapAddressByPort adds one address into a map by port, registering the address with a unique pointer, and preserving
|
|
||||||
// any existing ready state.
|
|
||||||
func mapAddressByPort(addr *api.EndpointAddress, port api.EndpointPort, ready bool, allAddrs map[addressKey]*api.EndpointAddress, portToAddrReadyMap map[api.EndpointPort]addressSet) *api.EndpointAddress {
|
|
||||||
// use addressKey to distinguish between two endpoints that are identical addresses
|
|
||||||
// but may have come from different hosts, for attribution.
|
|
||||||
key := addressKey{ip: addr.IP}
|
|
||||||
if addr.TargetRef != nil {
|
|
||||||
key.uid = addr.TargetRef.UID
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accumulate the address. The full EndpointAddress structure is preserved for use when
|
|
||||||
// we rebuild the subsets so that the final TargetRef has all of the necessary data.
|
|
||||||
existingAddress := allAddrs[key]
|
|
||||||
if existingAddress == nil {
|
|
||||||
// Make a copy so we don't write to the
|
|
||||||
// input args of this function.
|
|
||||||
existingAddress = &api.EndpointAddress{}
|
|
||||||
*existingAddress = *addr
|
|
||||||
allAddrs[key] = existingAddress
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remember that this port maps to this address.
|
|
||||||
if _, found := portToAddrReadyMap[port]; !found {
|
|
||||||
portToAddrReadyMap[port] = addressSet{}
|
|
||||||
}
|
|
||||||
// if we have not yet recorded this port for this address, or if the previous
|
|
||||||
// state was ready, write the current ready state. not ready always trumps
|
|
||||||
// ready.
|
|
||||||
if wasReady, found := portToAddrReadyMap[port][existingAddress]; !found || wasReady {
|
|
||||||
portToAddrReadyMap[port][existingAddress] = ready
|
|
||||||
}
|
|
||||||
return existingAddress
|
|
||||||
}
|
|
||||||
|
|
||||||
type addressSet map[*api.EndpointAddress]bool
|
|
||||||
|
|
||||||
type addrReady struct {
|
|
||||||
addr *api.EndpointAddress
|
|
||||||
ready bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func hashAddresses(addrs addressSet) string {
|
|
||||||
// Flatten the list of addresses into a string so it can be used as a
|
|
||||||
// map key. Unfortunately, DeepHashObject is implemented in terms of
|
|
||||||
// spew, and spew does not handle non-primitive map keys well. So
|
|
||||||
// first we collapse it into a slice, sort the slice, then hash that.
|
|
||||||
slice := make([]addrReady, 0, len(addrs))
|
|
||||||
for k, ready := range addrs {
|
|
||||||
slice = append(slice, addrReady{k, ready})
|
|
||||||
}
|
|
||||||
sort.Sort(addrsReady(slice))
|
|
||||||
hasher := md5.New()
|
|
||||||
hashutil.DeepHashObject(hasher, slice)
|
|
||||||
return hex.EncodeToString(hasher.Sum(nil)[0:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func lessAddrReady(a, b addrReady) bool {
|
|
||||||
// ready is not significant to hashing since we can't have duplicate addresses
|
|
||||||
return LessEndpointAddress(a.addr, b.addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
type addrsReady []addrReady
|
|
||||||
|
|
||||||
func (sl addrsReady) Len() int { return len(sl) }
|
|
||||||
func (sl addrsReady) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
|
||||||
func (sl addrsReady) Less(i, j int) bool {
|
|
||||||
return lessAddrReady(sl[i], sl[j])
|
|
||||||
}
|
|
||||||
|
|
||||||
// LessEndpointAddress compares IP addresses lexicographically and returns true if first argument is lesser than second
|
|
||||||
func LessEndpointAddress(a, b *api.EndpointAddress) bool {
|
|
||||||
ipComparison := bytes.Compare([]byte(a.IP), []byte(b.IP))
|
|
||||||
if ipComparison != 0 {
|
|
||||||
return ipComparison < 0
|
|
||||||
}
|
|
||||||
if b.TargetRef == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if a.TargetRef == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return a.TargetRef.UID < b.TargetRef.UID
|
|
||||||
}
|
|
||||||
|
|
||||||
// SortSubsets sorts an array of EndpointSubset objects in place. For ease of
|
|
||||||
// use it returns the input slice.
|
|
||||||
func SortSubsets(subsets []api.EndpointSubset) []api.EndpointSubset {
|
|
||||||
for i := range subsets {
|
|
||||||
ss := &subsets[i]
|
|
||||||
sort.Sort(addrsByIPAndUID(ss.Addresses))
|
|
||||||
sort.Sort(addrsByIPAndUID(ss.NotReadyAddresses))
|
|
||||||
sort.Sort(portsByHash(ss.Ports))
|
|
||||||
}
|
|
||||||
sort.Sort(subsetsByHash(subsets))
|
|
||||||
return subsets
|
|
||||||
}
|
|
||||||
|
|
||||||
func hashObject(hasher hash.Hash, obj interface{}) []byte {
|
|
||||||
hashutil.DeepHashObject(hasher, obj)
|
|
||||||
return hasher.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
type subsetsByHash []api.EndpointSubset
|
|
||||||
|
|
||||||
func (sl subsetsByHash) Len() int { return len(sl) }
|
|
||||||
func (sl subsetsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
|
||||||
func (sl subsetsByHash) Less(i, j int) bool {
|
|
||||||
hasher := md5.New()
|
|
||||||
h1 := hashObject(hasher, sl[i])
|
|
||||||
h2 := hashObject(hasher, sl[j])
|
|
||||||
return bytes.Compare(h1, h2) < 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type addrsByIPAndUID []api.EndpointAddress
|
|
||||||
|
|
||||||
func (sl addrsByIPAndUID) Len() int { return len(sl) }
|
|
||||||
func (sl addrsByIPAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
|
||||||
func (sl addrsByIPAndUID) Less(i, j int) bool {
|
|
||||||
return LessEndpointAddress(&sl[i], &sl[j])
|
|
||||||
}
|
|
||||||
|
|
||||||
type portsByHash []api.EndpointPort
|
|
||||||
|
|
||||||
func (sl portsByHash) Len() int { return len(sl) }
|
|
||||||
func (sl portsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
|
||||||
func (sl portsByHash) Less(i, j int) bool {
|
|
||||||
hasher := md5.New()
|
|
||||||
h1 := hashObject(hasher, sl[i])
|
|
||||||
h2 := hashObject(hasher, sl[j])
|
|
||||||
return bytes.Compare(h1, h2) < 0
|
|
||||||
}
|
|
|
@ -42,7 +42,8 @@ func BindFlags(l *componentbaseconfig.LeaderElectionConfiguration, fs *pflag.Fla
|
||||||
"of a leadership. This is only applicable if leader election is enabled.")
|
"of a leadership. This is only applicable if leader election is enabled.")
|
||||||
fs.StringVar(&l.ResourceLock, "leader-elect-resource-lock", l.ResourceLock, ""+
|
fs.StringVar(&l.ResourceLock, "leader-elect-resource-lock", l.ResourceLock, ""+
|
||||||
"The type of resource object that is used for locking during "+
|
"The type of resource object that is used for locking during "+
|
||||||
"leader election. Supported options are `endpoints` (default) and `configmaps`.")
|
"leader election. Supported options are 'endpoints', 'configmaps', "+
|
||||||
|
"'leases', 'endpointsleases' and 'configmapsleases'.")
|
||||||
fs.StringVar(&l.ResourceName, "leader-elect-resource-name", l.ResourceName, ""+
|
fs.StringVar(&l.ResourceName, "leader-elect-resource-name", l.ResourceName, ""+
|
||||||
"The name of resource object that is used for locking during "+
|
"The name of resource object that is used for locking during "+
|
||||||
"leader election.")
|
"leader election.")
|
||||||
|
|
|
@ -455,9 +455,18 @@ func (e *EndpointController) syncService(key string) error {
|
||||||
|
|
||||||
createEndpoints := len(currentEndpoints.ResourceVersion) == 0
|
createEndpoints := len(currentEndpoints.ResourceVersion) == 0
|
||||||
|
|
||||||
|
// Compare the sorted subsets and labels
|
||||||
|
// Remove the HeadlessService label from the endpoints if it exists,
|
||||||
|
// as this won't be set on the service itself
|
||||||
|
// and will cause a false negative in this diff check.
|
||||||
|
// But first check if it has that label to avoid expensive copies.
|
||||||
|
compareLabels := currentEndpoints.Labels
|
||||||
|
if _, ok := currentEndpoints.Labels[v1.IsHeadlessService]; ok {
|
||||||
|
compareLabels = utillabels.CloneAndRemoveLabel(currentEndpoints.Labels, v1.IsHeadlessService)
|
||||||
|
}
|
||||||
if !createEndpoints &&
|
if !createEndpoints &&
|
||||||
apiequality.Semantic.DeepEqual(currentEndpoints.Subsets, subsets) &&
|
apiequality.Semantic.DeepEqual(currentEndpoints.Subsets, subsets) &&
|
||||||
apiequality.Semantic.DeepEqual(currentEndpoints.Labels, service.Labels) {
|
apiequality.Semantic.DeepEqual(compareLabels, service.Labels) {
|
||||||
klog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
|
klog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,10 +114,14 @@ func ReadDockercfgFile(searchPaths []string) (cfg DockerConfig, err error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
cfg, err := readDockerConfigFileFromBytes(contents)
|
cfg, err := readDockerConfigFileFromBytes(contents)
|
||||||
if err == nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation)
|
klog.V(4).Infof("couldn't get the config from %q contents: %v", absDockerConfigFileLocation, err)
|
||||||
return cfg, nil
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
klog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation)
|
||||||
|
return cfg, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("couldn't find valid .dockercfg after checking in %v", searchPaths)
|
return nil, fmt.Errorf("couldn't find valid .dockercfg after checking in %v", searchPaths)
|
||||||
}
|
}
|
||||||
|
@ -224,8 +228,7 @@ func ReadDockerConfigFileFromUrl(url string, client *http.Client, header *http.H
|
||||||
|
|
||||||
func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) {
|
func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) {
|
||||||
if err = json.Unmarshal(contents, &cfg); err != nil {
|
if err = json.Unmarshal(contents, &cfg); err != nil {
|
||||||
klog.Errorf("while trying to parse blob %q: %v", contents, err)
|
return nil, errors.New("error occurred while trying to unmarshal json")
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -233,8 +236,7 @@ func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error
|
||||||
func readDockerConfigJsonFileFromBytes(contents []byte) (cfg DockerConfig, err error) {
|
func readDockerConfigJsonFileFromBytes(contents []byte) (cfg DockerConfig, err error) {
|
||||||
var cfgJson DockerConfigJson
|
var cfgJson DockerConfigJson
|
||||||
if err = json.Unmarshal(contents, &cfgJson); err != nil {
|
if err = json.Unmarshal(contents, &cfgJson); err != nil {
|
||||||
klog.Errorf("while trying to parse blob %q: %v", contents, err)
|
return nil, errors.New("error occurred while trying to unmarshal json")
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
cfg = cfgJson.Auths
|
cfg = cfgJson.Auths
|
||||||
return
|
return
|
||||||
|
|
|
@ -161,7 +161,10 @@ func (ect *EndpointChangeTracker) Update(previous, current *v1.Endpoints) bool {
|
||||||
ect.items[namespacedName] = change
|
ect.items[namespacedName] = change
|
||||||
}
|
}
|
||||||
|
|
||||||
if t := getLastChangeTriggerTime(endpoints.Annotations); !t.IsZero() {
|
// In case of Endpoints deletion, the LastChangeTriggerTime annotation is
|
||||||
|
// by-definition coming from the time of last update, which is not what
|
||||||
|
// we want to measure. So we simply ignore it in this cases.
|
||||||
|
if t := getLastChangeTriggerTime(endpoints.Annotations); !t.IsZero() && current != nil {
|
||||||
ect.lastChangeTriggerTimes[namespacedName] = append(ect.lastChangeTriggerTimes[namespacedName], t)
|
ect.lastChangeTriggerTimes[namespacedName] = append(ect.lastChangeTriggerTimes[namespacedName], t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,7 +215,12 @@ func (ect *EndpointChangeTracker) EndpointSliceUpdate(endpointSlice *discovery.E
|
||||||
|
|
||||||
if changeNeeded {
|
if changeNeeded {
|
||||||
metrics.EndpointChangesPending.Inc()
|
metrics.EndpointChangesPending.Inc()
|
||||||
if t := getLastChangeTriggerTime(endpointSlice.Annotations); !t.IsZero() {
|
// In case of Endpoints deletion, the LastChangeTriggerTime annotation is
|
||||||
|
// by-definition coming from the time of last update, which is not what
|
||||||
|
// we want to measure. So we simply ignore it in this cases.
|
||||||
|
// TODO(wojtek-t, robscott): Address the problem for EndpointSlice deletion
|
||||||
|
// when other EndpointSlice for that service still exist.
|
||||||
|
if t := getLastChangeTriggerTime(endpointSlice.Annotations); !t.IsZero() && !removeSlice {
|
||||||
ect.lastChangeTriggerTimes[namespacedName] =
|
ect.lastChangeTriggerTimes[namespacedName] =
|
||||||
append(ect.lastChangeTriggerTimes[namespacedName], t)
|
append(ect.lastChangeTriggerTimes[namespacedName], t)
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,6 @@ go_library(
|
||||||
],
|
],
|
||||||
importpath = "k8s.io/kubernetes/pkg/registry/core/endpoint",
|
importpath = "k8s.io/kubernetes/pkg/registry/core/endpoint",
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/endpoints:go_default_library",
|
|
||||||
"//pkg/api/legacyscheme:go_default_library",
|
"//pkg/api/legacyscheme:go_default_library",
|
||||||
"//pkg/apis/core:go_default_library",
|
"//pkg/apis/core:go_default_library",
|
||||||
"//pkg/apis/core/validation:go_default_library",
|
"//pkg/apis/core/validation:go_default_library",
|
||||||
|
|
|
@ -22,7 +22,6 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
"k8s.io/apiserver/pkg/storage/names"
|
"k8s.io/apiserver/pkg/storage/names"
|
||||||
endptspkg "k8s.io/kubernetes/pkg/api/endpoints"
|
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/apis/core/validation"
|
"k8s.io/kubernetes/pkg/apis/core/validation"
|
||||||
|
@ -60,8 +59,6 @@ func (endpointsStrategy) Validate(ctx context.Context, obj runtime.Object) field
|
||||||
|
|
||||||
// Canonicalize normalizes the object after validation.
|
// Canonicalize normalizes the object after validation.
|
||||||
func (endpointsStrategy) Canonicalize(obj runtime.Object) {
|
func (endpointsStrategy) Canonicalize(obj runtime.Object) {
|
||||||
endpoints := obj.(*api.Endpoints)
|
|
||||||
endpoints.Subsets = endptspkg.RepackSubsets(endpoints.Subsets)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllowCreateOnUpdate is true for endpoints.
|
// AllowCreateOnUpdate is true for endpoints.
|
||||||
|
|
|
@ -229,13 +229,13 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*nodeinfo.Node
|
||||||
klog.Error("node not found")
|
klog.Error("node not found")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
for _, existingPod := range nodeInfo.PodsWithRequiredAntiAffinity() {
|
||||||
existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, node)
|
existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh.SendErrorWithCancel(err, cancel)
|
errCh.SendErrorWithCancel(err, cancel)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if existingPodTopologyMaps != nil {
|
if len(existingPodTopologyMaps) != 0 {
|
||||||
appendResult(existingPodTopologyMaps)
|
appendResult(existingPodTopologyMaps)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -334,7 +334,7 @@ func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState *framework
|
||||||
if allNodes, err = pl.sharedLister.NodeInfos().List(); err != nil {
|
if allNodes, err = pl.sharedLister.NodeInfos().List(); err != nil {
|
||||||
return framework.NewStatus(framework.Error, fmt.Sprintf("failed to list NodeInfos: %v", err))
|
return framework.NewStatus(framework.Error, fmt.Sprintf("failed to list NodeInfos: %v", err))
|
||||||
}
|
}
|
||||||
if havePodsWithAffinityNodes, err = pl.sharedLister.NodeInfos().HavePodsWithAffinityList(); err != nil {
|
if havePodsWithAffinityNodes, err = pl.sharedLister.NodeInfos().HavePodsWithRequiredAntiAffinityList(); err != nil {
|
||||||
return framework.NewStatus(framework.Error, fmt.Sprintf("failed to list NodeInfos with pods with affinity: %v", err))
|
return framework.NewStatus(framework.Error, fmt.Sprintf("failed to list NodeInfos with pods with affinity: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -196,6 +196,8 @@ func (cache *schedulerCache) Dump() *Dump {
|
||||||
|
|
||||||
// UpdateSnapshot takes a snapshot of cached NodeInfo map. This is called at
|
// UpdateSnapshot takes a snapshot of cached NodeInfo map. This is called at
|
||||||
// beginning of every scheduling cycle.
|
// beginning of every scheduling cycle.
|
||||||
|
// The snapshot only includes Nodes that are not deleted at the time this function is called.
|
||||||
|
// nodeinfo.Node() is guaranteed to be not nil for all the nodes in the snapshot.
|
||||||
// This function tracks generation number of NodeInfo and updates only the
|
// This function tracks generation number of NodeInfo and updates only the
|
||||||
// entries of an existing snapshot that have changed after the snapshot was taken.
|
// entries of an existing snapshot that have changed after the snapshot was taken.
|
||||||
func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
|
func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
|
||||||
|
@ -213,6 +215,10 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
|
||||||
// status from having pods with affinity to NOT having pods with affinity or the other
|
// status from having pods with affinity to NOT having pods with affinity or the other
|
||||||
// way around.
|
// way around.
|
||||||
updateNodesHavePodsWithAffinity := false
|
updateNodesHavePodsWithAffinity := false
|
||||||
|
// HavePodsWithRequiredAntiAffinityNodeInfoList must be re-created if a node changed its
|
||||||
|
// status from having pods with required anti-affinity to NOT having pods with required
|
||||||
|
// anti-affinity or the other way around.
|
||||||
|
updateNodesHavePodsWithRequiredAntiAffinity := false
|
||||||
|
|
||||||
// Start from the head of the NodeInfo doubly linked list and update snapshot
|
// Start from the head of the NodeInfo doubly linked list and update snapshot
|
||||||
// of NodeInfos updated after the last snapshot.
|
// of NodeInfos updated after the last snapshot.
|
||||||
|
@ -239,6 +245,9 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
|
||||||
if (len(existing.PodsWithAffinity()) > 0) != (len(clone.PodsWithAffinity()) > 0) {
|
if (len(existing.PodsWithAffinity()) > 0) != (len(clone.PodsWithAffinity()) > 0) {
|
||||||
updateNodesHavePodsWithAffinity = true
|
updateNodesHavePodsWithAffinity = true
|
||||||
}
|
}
|
||||||
|
if (len(existing.PodsWithRequiredAntiAffinity()) > 0) != (len(clone.PodsWithRequiredAntiAffinity()) > 0) {
|
||||||
|
updateNodesHavePodsWithRequiredAntiAffinity = true
|
||||||
|
}
|
||||||
// We need to preserve the original pointer of the NodeInfo struct since it
|
// We need to preserve the original pointer of the NodeInfo struct since it
|
||||||
// is used in the NodeInfoList, which we may not update.
|
// is used in the NodeInfoList, which we may not update.
|
||||||
*existing = *clone
|
*existing = *clone
|
||||||
|
@ -249,12 +258,15 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
|
||||||
nodeSnapshot.generation = cache.headNode.info.GetGeneration()
|
nodeSnapshot.generation = cache.headNode.info.GetGeneration()
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(nodeSnapshot.nodeInfoMap) > len(cache.nodes) {
|
// Comparing to pods in nodeTree.
|
||||||
|
// Deleted nodes get removed from the tree, but they might remain in the nodes map
|
||||||
|
// if they still have non-deleted Pods.
|
||||||
|
if len(nodeSnapshot.nodeInfoMap) > cache.nodeTree.numNodes {
|
||||||
cache.removeDeletedNodesFromSnapshot(nodeSnapshot)
|
cache.removeDeletedNodesFromSnapshot(nodeSnapshot)
|
||||||
updateAllLists = true
|
updateAllLists = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if updateAllLists || updateNodesHavePodsWithAffinity {
|
if updateAllLists || updateNodesHavePodsWithAffinity || updateNodesHavePodsWithRequiredAntiAffinity {
|
||||||
cache.updateNodeInfoSnapshotList(nodeSnapshot, updateAllLists)
|
cache.updateNodeInfoSnapshotList(nodeSnapshot, updateAllLists)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -276,6 +288,7 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
|
||||||
|
|
||||||
func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll bool) {
|
func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll bool) {
|
||||||
snapshot.havePodsWithAffinityNodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
snapshot.havePodsWithAffinityNodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||||
|
snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||||
if updateAll {
|
if updateAll {
|
||||||
// Take a snapshot of the nodes order in the tree
|
// Take a snapshot of the nodes order in the tree
|
||||||
snapshot.nodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
snapshot.nodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||||
|
@ -287,6 +300,9 @@ func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, upda
|
||||||
if len(n.PodsWithAffinity()) > 0 {
|
if len(n.PodsWithAffinity()) > 0 {
|
||||||
snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, n)
|
snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, n)
|
||||||
}
|
}
|
||||||
|
if len(n.PodsWithRequiredAntiAffinity()) > 0 {
|
||||||
|
snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = append(snapshot.havePodsWithRequiredAntiAffinityNodeInfoList, n)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
klog.Errorf("node %q exist in nodeTree but not in NodeInfoMap, this should not happen.", nodeName)
|
klog.Errorf("node %q exist in nodeTree but not in NodeInfoMap, this should not happen.", nodeName)
|
||||||
}
|
}
|
||||||
|
@ -296,18 +312,21 @@ func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, upda
|
||||||
if len(n.PodsWithAffinity()) > 0 {
|
if len(n.PodsWithAffinity()) > 0 {
|
||||||
snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, n)
|
snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, n)
|
||||||
}
|
}
|
||||||
|
if len(n.PodsWithRequiredAntiAffinity()) > 0 {
|
||||||
|
snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = append(snapshot.havePodsWithRequiredAntiAffinityNodeInfoList, n)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If certain nodes were deleted after the last snapshot was taken, we should remove them from the snapshot.
|
// If certain nodes were deleted after the last snapshot was taken, we should remove them from the snapshot.
|
||||||
func (cache *schedulerCache) removeDeletedNodesFromSnapshot(snapshot *Snapshot) {
|
func (cache *schedulerCache) removeDeletedNodesFromSnapshot(snapshot *Snapshot) {
|
||||||
toDelete := len(snapshot.nodeInfoMap) - len(cache.nodes)
|
toDelete := len(snapshot.nodeInfoMap) - cache.nodeTree.numNodes
|
||||||
for name := range snapshot.nodeInfoMap {
|
for name := range snapshot.nodeInfoMap {
|
||||||
if toDelete <= 0 {
|
if toDelete <= 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if _, ok := cache.nodes[name]; !ok {
|
if n, ok := cache.nodes[name]; !ok || n.info.Node() == nil {
|
||||||
delete(snapshot.nodeInfoMap, name)
|
delete(snapshot.nodeInfoMap, name)
|
||||||
toDelete--
|
toDelete--
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,6 +99,8 @@ type Cache interface {
|
||||||
// UpdateSnapshot updates the passed infoSnapshot to the current contents of Cache.
|
// UpdateSnapshot updates the passed infoSnapshot to the current contents of Cache.
|
||||||
// The node info contains aggregated information of pods scheduled (including assumed to be)
|
// The node info contains aggregated information of pods scheduled (including assumed to be)
|
||||||
// on this node.
|
// on this node.
|
||||||
|
// The snapshot only includes Nodes that are not deleted at the time this function is called.
|
||||||
|
// nodeinfo.Node() is guaranteed to be not nil for all the nodes in the snapshot.
|
||||||
UpdateSnapshot(nodeSnapshot *Snapshot) error
|
UpdateSnapshot(nodeSnapshot *Snapshot) error
|
||||||
|
|
||||||
// Dump produces a dump of the current cache.
|
// Dump produces a dump of the current cache.
|
||||||
|
|
|
@ -35,7 +35,10 @@ type Snapshot struct {
|
||||||
nodeInfoList []*schedulernodeinfo.NodeInfo
|
nodeInfoList []*schedulernodeinfo.NodeInfo
|
||||||
// havePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms.
|
// havePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms.
|
||||||
havePodsWithAffinityNodeInfoList []*schedulernodeinfo.NodeInfo
|
havePodsWithAffinityNodeInfoList []*schedulernodeinfo.NodeInfo
|
||||||
generation int64
|
// havePodsWithRequiredAntiAffinityNodeInfoList is the list of nodes with at least one
|
||||||
|
// pod declaring required anti-affinity terms.
|
||||||
|
havePodsWithRequiredAntiAffinityNodeInfoList []*schedulernodeinfo.NodeInfo
|
||||||
|
generation int64
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ schedulerlisters.SharedLister = &Snapshot{}
|
var _ schedulerlisters.SharedLister = &Snapshot{}
|
||||||
|
@ -52,17 +55,22 @@ func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot {
|
||||||
nodeInfoMap := createNodeInfoMap(pods, nodes)
|
nodeInfoMap := createNodeInfoMap(pods, nodes)
|
||||||
nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap))
|
nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap))
|
||||||
havePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap))
|
havePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap))
|
||||||
|
havePodsWithRequiredAntiAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap))
|
||||||
for _, v := range nodeInfoMap {
|
for _, v := range nodeInfoMap {
|
||||||
nodeInfoList = append(nodeInfoList, v)
|
nodeInfoList = append(nodeInfoList, v)
|
||||||
if len(v.PodsWithAffinity()) > 0 {
|
if len(v.PodsWithAffinity()) > 0 {
|
||||||
havePodsWithAffinityNodeInfoList = append(havePodsWithAffinityNodeInfoList, v)
|
havePodsWithAffinityNodeInfoList = append(havePodsWithAffinityNodeInfoList, v)
|
||||||
}
|
}
|
||||||
|
if len(v.PodsWithRequiredAntiAffinity()) > 0 {
|
||||||
|
havePodsWithRequiredAntiAffinityNodeInfoList = append(havePodsWithRequiredAntiAffinityNodeInfoList, v)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s := NewEmptySnapshot()
|
s := NewEmptySnapshot()
|
||||||
s.nodeInfoMap = nodeInfoMap
|
s.nodeInfoMap = nodeInfoMap
|
||||||
s.nodeInfoList = nodeInfoList
|
s.nodeInfoList = nodeInfoList
|
||||||
s.havePodsWithAffinityNodeInfoList = havePodsWithAffinityNodeInfoList
|
s.havePodsWithAffinityNodeInfoList = havePodsWithAffinityNodeInfoList
|
||||||
|
s.havePodsWithRequiredAntiAffinityNodeInfoList = havePodsWithRequiredAntiAffinityNodeInfoList
|
||||||
|
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
@ -177,6 +185,11 @@ func (s *Snapshot) HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, er
|
||||||
return s.havePodsWithAffinityNodeInfoList, nil
|
return s.havePodsWithAffinityNodeInfoList, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HavePodsWithRequiredAntiAffinityList returns the list of nodes with at least one pods with inter-pod affinity
|
||||||
|
func (s *Snapshot) HavePodsWithRequiredAntiAffinityList() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||||
|
return s.havePodsWithRequiredAntiAffinityNodeInfoList, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Get returns the NodeInfo of the given node name.
|
// Get returns the NodeInfo of the given node name.
|
||||||
func (s *Snapshot) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) {
|
func (s *Snapshot) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) {
|
||||||
if v, ok := s.nodeInfoMap[nodeName]; ok && v.Node() != nil {
|
if v, ok := s.nodeInfoMap[nodeName]; ok && v.Node() != nil {
|
||||||
|
|
|
@ -41,6 +41,8 @@ type NodeInfoLister interface {
|
||||||
List() ([]*schedulernodeinfo.NodeInfo, error)
|
List() ([]*schedulernodeinfo.NodeInfo, error)
|
||||||
// Returns the list of NodeInfos of nodes with pods with affinity terms.
|
// Returns the list of NodeInfos of nodes with pods with affinity terms.
|
||||||
HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error)
|
HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error)
|
||||||
|
// Returns the list of NodeInfos of nodes with pods with required anti-affinity terms.
|
||||||
|
HavePodsWithRequiredAntiAffinityList() ([]*schedulernodeinfo.NodeInfo, error)
|
||||||
// Returns the NodeInfo of the given node name.
|
// Returns the NodeInfo of the given node name.
|
||||||
Get(nodeName string) (*schedulernodeinfo.NodeInfo, error)
|
Get(nodeName string) (*schedulernodeinfo.NodeInfo, error)
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,9 +49,10 @@ type NodeInfo struct {
|
||||||
// Overall node information.
|
// Overall node information.
|
||||||
node *v1.Node
|
node *v1.Node
|
||||||
|
|
||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
podsWithAffinity []*v1.Pod
|
podsWithAffinity []*v1.Pod
|
||||||
usedPorts HostPortInfo
|
podsWithRequiredAntiAffinity []*v1.Pod
|
||||||
|
usedPorts HostPortInfo
|
||||||
|
|
||||||
// Total requested resources of all pods on this node. This includes assumed
|
// Total requested resources of all pods on this node. This includes assumed
|
||||||
// pods, which scheduler has sent for binding, but may not be scheduled yet.
|
// pods, which scheduler has sent for binding, but may not be scheduled yet.
|
||||||
|
@ -339,6 +340,14 @@ func (n *NodeInfo) PodsWithAffinity() []*v1.Pod {
|
||||||
return n.podsWithAffinity
|
return n.podsWithAffinity
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PodsWithRequiredAntiAffinity return all pods with required anti-affinity constraints on this node.
|
||||||
|
func (n *NodeInfo) PodsWithRequiredAntiAffinity() []*v1.Pod {
|
||||||
|
if n == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return n.podsWithRequiredAntiAffinity
|
||||||
|
}
|
||||||
|
|
||||||
// AllowedPodNumber returns the number of the allowed pods on this node.
|
// AllowedPodNumber returns the number of the allowed pods on this node.
|
||||||
func (n *NodeInfo) AllowedPodNumber() int {
|
func (n *NodeInfo) AllowedPodNumber() int {
|
||||||
if n == nil || n.allocatableResource == nil {
|
if n == nil || n.allocatableResource == nil {
|
||||||
|
@ -445,6 +454,9 @@ func (n *NodeInfo) Clone() *NodeInfo {
|
||||||
if len(n.podsWithAffinity) > 0 {
|
if len(n.podsWithAffinity) > 0 {
|
||||||
clone.podsWithAffinity = append([]*v1.Pod(nil), n.podsWithAffinity...)
|
clone.podsWithAffinity = append([]*v1.Pod(nil), n.podsWithAffinity...)
|
||||||
}
|
}
|
||||||
|
if len(n.podsWithRequiredAntiAffinity) > 0 {
|
||||||
|
clone.podsWithRequiredAntiAffinity = append([]*v1.Pod(nil), n.podsWithRequiredAntiAffinity...)
|
||||||
|
}
|
||||||
if len(n.taints) > 0 {
|
if len(n.taints) > 0 {
|
||||||
clone.taints = append([]v1.Taint(nil), n.taints...)
|
clone.taints = append([]v1.Taint(nil), n.taints...)
|
||||||
}
|
}
|
||||||
|
@ -477,6 +489,11 @@ func hasPodAffinityConstraints(pod *v1.Pod) bool {
|
||||||
return affinity != nil && (affinity.PodAffinity != nil || affinity.PodAntiAffinity != nil)
|
return affinity != nil && (affinity.PodAffinity != nil || affinity.PodAntiAffinity != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hasRequiredPodAntiAffinityConstraints(pod *v1.Pod) bool {
|
||||||
|
affinity := pod.Spec.Affinity
|
||||||
|
return affinity != nil && affinity.PodAntiAffinity != nil && len(affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0
|
||||||
|
}
|
||||||
|
|
||||||
// AddPod adds pod information to this NodeInfo.
|
// AddPod adds pod information to this NodeInfo.
|
||||||
func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
||||||
res, non0CPU, non0Mem := calculateResource(pod)
|
res, non0CPU, non0Mem := calculateResource(pod)
|
||||||
|
@ -495,6 +512,9 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
||||||
if hasPodAffinityConstraints(pod) {
|
if hasPodAffinityConstraints(pod) {
|
||||||
n.podsWithAffinity = append(n.podsWithAffinity, pod)
|
n.podsWithAffinity = append(n.podsWithAffinity, pod)
|
||||||
}
|
}
|
||||||
|
if hasRequiredPodAntiAffinityConstraints(pod) {
|
||||||
|
n.podsWithRequiredAntiAffinity = append(n.podsWithRequiredAntiAffinity, pod)
|
||||||
|
}
|
||||||
|
|
||||||
// Consume ports when pods added.
|
// Consume ports when pods added.
|
||||||
n.UpdateUsedPorts(pod, true)
|
n.UpdateUsedPorts(pod, true)
|
||||||
|
@ -502,33 +522,45 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
||||||
n.generation = nextGeneration()
|
n.generation = nextGeneration()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePod subtracts pod information from this NodeInfo.
|
func removeFromSlice(s []*v1.Pod, k string) []*v1.Pod {
|
||||||
func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
for i := range s {
|
||||||
k1, err := GetPodKey(pod)
|
k2, err := GetPodKey(s[i])
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range n.podsWithAffinity {
|
|
||||||
k2, err := GetPodKey(n.podsWithAffinity[i])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Cannot get pod key, err: %v", err)
|
klog.Errorf("Cannot get pod key, err: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if k1 == k2 {
|
if k == k2 {
|
||||||
// delete the element
|
// delete the element
|
||||||
n.podsWithAffinity[i] = n.podsWithAffinity[len(n.podsWithAffinity)-1]
|
s[i] = s[len(s)-1]
|
||||||
n.podsWithAffinity = n.podsWithAffinity[:len(n.podsWithAffinity)-1]
|
s = s[:len(s)-1]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return s
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemovePod subtracts pod information from this NodeInfo.
|
||||||
|
func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
||||||
|
k, err := GetPodKey(pod)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasPodAffinityConstraints(pod) {
|
||||||
|
n.podsWithAffinity = removeFromSlice(n.podsWithAffinity, k)
|
||||||
|
}
|
||||||
|
if hasRequiredPodAntiAffinityConstraints(pod) {
|
||||||
|
n.podsWithRequiredAntiAffinity = removeFromSlice(n.podsWithRequiredAntiAffinity, k)
|
||||||
|
}
|
||||||
|
|
||||||
for i := range n.pods {
|
for i := range n.pods {
|
||||||
k2, err := GetPodKey(n.pods[i])
|
k2, err := GetPodKey(n.pods[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Cannot get pod key, err: %v", err)
|
klog.Errorf("Cannot get pod key, err: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if k1 == k2 {
|
if k == k2 {
|
||||||
// delete the element
|
// delete the element
|
||||||
n.pods[i] = n.pods[len(n.pods)-1]
|
n.pods[i] = n.pods[len(n.pods)-1]
|
||||||
n.pods = n.pods[:len(n.pods)-1]
|
n.pods = n.pods[:len(n.pods)-1]
|
||||||
|
@ -563,6 +595,9 @@ func (n *NodeInfo) resetSlicesIfEmpty() {
|
||||||
if len(n.podsWithAffinity) == 0 {
|
if len(n.podsWithAffinity) == 0 {
|
||||||
n.podsWithAffinity = nil
|
n.podsWithAffinity = nil
|
||||||
}
|
}
|
||||||
|
if len(n.podsWithRequiredAntiAffinity) == 0 {
|
||||||
|
n.podsWithRequiredAntiAffinity = nil
|
||||||
|
}
|
||||||
if len(n.pods) == 0 {
|
if len(n.pods) == 0 {
|
||||||
n.pods = nil
|
n.pods = nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -241,7 +241,7 @@ type Cloud struct {
|
||||||
|
|
||||||
ResourceRequestBackoff wait.Backoff
|
ResourceRequestBackoff wait.Backoff
|
||||||
metadata *InstanceMetadataService
|
metadata *InstanceMetadataService
|
||||||
vmSet VMSet
|
VMSet VMSet
|
||||||
|
|
||||||
// ipv6DualStack allows overriding for unit testing. It's normally initialized from featuregates
|
// ipv6DualStack allows overriding for unit testing. It's normally initialized from featuregates
|
||||||
ipv6DualStackEnabled bool
|
ipv6DualStackEnabled bool
|
||||||
|
@ -484,12 +484,12 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.EqualFold(vmTypeVMSS, az.Config.VMType) {
|
if strings.EqualFold(vmTypeVMSS, az.Config.VMType) {
|
||||||
az.vmSet, err = newScaleSet(az)
|
az.VMSet, err = newScaleSet(az)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
az.vmSet = newAvailabilitySet(az)
|
az.VMSet = newAvailabilitySet(az)
|
||||||
}
|
}
|
||||||
|
|
||||||
az.vmCache, err = az.newVMCache()
|
az.vmCache, err = az.newVMCache()
|
||||||
|
|
|
@ -111,7 +111,7 @@ func (az *Cloud) getPrivateIPsForMachineWithRetry(nodeName types.NodeName) ([]st
|
||||||
var privateIPs []string
|
var privateIPs []string
|
||||||
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
|
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
|
||||||
var retryErr error
|
var retryErr error
|
||||||
privateIPs, retryErr = az.vmSet.GetPrivateIPsByNodeName(string(nodeName))
|
privateIPs, retryErr = az.VMSet.GetPrivateIPsByNodeName(string(nodeName))
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
// won't retry since the instance doesn't exist on Azure.
|
// won't retry since the instance doesn't exist on Azure.
|
||||||
if retryErr == cloudprovider.InstanceNotFound {
|
if retryErr == cloudprovider.InstanceNotFound {
|
||||||
|
@ -135,7 +135,7 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string,
|
||||||
var ip, publicIP string
|
var ip, publicIP string
|
||||||
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
|
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
|
||||||
var retryErr error
|
var retryErr error
|
||||||
ip, publicIP, retryErr = az.vmSet.GetIPByNodeName(string(name))
|
ip, publicIP, retryErr = az.VMSet.GetIPByNodeName(string(name))
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
|
klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
|
||||||
return false, nil
|
return false, nil
|
||||||
|
|
|
@ -49,7 +49,9 @@ const (
|
||||||
errLeaseFailed = "AcquireDiskLeaseFailed"
|
errLeaseFailed = "AcquireDiskLeaseFailed"
|
||||||
errLeaseIDMissing = "LeaseIdMissing"
|
errLeaseIDMissing = "LeaseIdMissing"
|
||||||
errContainerNotFound = "ContainerNotFound"
|
errContainerNotFound = "ContainerNotFound"
|
||||||
errDiskBlobNotFound = "DiskBlobNotFound"
|
errStatusCode400 = "statuscode=400"
|
||||||
|
errInvalidParameter = `code="invalidparameter"`
|
||||||
|
errTargetInstanceIds = `target="instanceids"`
|
||||||
sourceSnapshot = "snapshot"
|
sourceSnapshot = "snapshot"
|
||||||
sourceVolume = "volume"
|
sourceVolume = "volume"
|
||||||
|
|
||||||
|
@ -90,15 +92,15 @@ type controllerCommon struct {
|
||||||
|
|
||||||
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
|
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
|
||||||
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.AzureCacheReadType) (VMSet, error) {
|
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.AzureCacheReadType) (VMSet, error) {
|
||||||
// 1. vmType is standard, return cloud.vmSet directly.
|
// 1. vmType is standard, return cloud.VMSet directly.
|
||||||
if c.cloud.VMType == vmTypeStandard {
|
if c.cloud.VMType == vmTypeStandard {
|
||||||
return c.cloud.vmSet, nil
|
return c.cloud.VMSet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
ss, ok := c.cloud.VMSet.(*scaleSet)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.VMSet, c.cloud.VMType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. If the node is managed by availability set, then return ss.availabilitySet.
|
// 3. If the node is managed by availability set, then return ss.availabilitySet.
|
||||||
|
@ -214,24 +216,32 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N
|
||||||
c.diskAttachDetachMap.Delete(strings.ToLower(diskURI))
|
c.diskAttachDetachMap.Delete(strings.ToLower(diskURI))
|
||||||
c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName)))
|
c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName)))
|
||||||
|
|
||||||
if err != nil && retry.IsErrorRetriable(err) && c.cloud.CloudProviderBackoff {
|
if err != nil {
|
||||||
klog.V(2).Infof("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err)
|
if isInstanceNotFoundError(err) {
|
||||||
retryErr := kwait.ExponentialBackoff(c.cloud.RequestBackoff(), func() (bool, error) {
|
// if host doesn't exist, no need to detach
|
||||||
c.vmLockMap.LockEntry(strings.ToLower(string(nodeName)))
|
klog.Warningf("azureDisk - got InstanceNotFoundError(%v), DetachDisk(%s) will assume disk is already detached",
|
||||||
c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "detaching")
|
err, diskURI)
|
||||||
err := vmset.DetachDisk(diskName, diskURI, nodeName)
|
return nil
|
||||||
c.diskAttachDetachMap.Delete(strings.ToLower(diskURI))
|
}
|
||||||
c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName)))
|
if retry.IsErrorRetriable(err) && c.cloud.CloudProviderBackoff {
|
||||||
|
klog.Warningf("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err)
|
||||||
|
retryErr := kwait.ExponentialBackoff(c.cloud.RequestBackoff(), func() (bool, error) {
|
||||||
|
c.vmLockMap.LockEntry(strings.ToLower(string(nodeName)))
|
||||||
|
c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "detaching")
|
||||||
|
err := vmset.DetachDisk(diskName, diskURI, nodeName)
|
||||||
|
c.diskAttachDetachMap.Delete(strings.ToLower(diskURI))
|
||||||
|
c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName)))
|
||||||
|
|
||||||
retriable := false
|
retriable := false
|
||||||
if err != nil && retry.IsErrorRetriable(err) {
|
if err != nil && retry.IsErrorRetriable(err) {
|
||||||
retriable = true
|
retriable = true
|
||||||
|
}
|
||||||
|
return !retriable, err
|
||||||
|
})
|
||||||
|
if retryErr != nil {
|
||||||
|
err = retryErr
|
||||||
|
klog.V(2).Infof("azureDisk - update abort backoff: detach disk(%s, %s), err: %v", diskName, diskURI, err)
|
||||||
}
|
}
|
||||||
return !retriable, err
|
|
||||||
})
|
|
||||||
if retryErr != nil {
|
|
||||||
err = retryErr
|
|
||||||
klog.V(2).Infof("azureDisk - update abort backoff: detach disk(%s, %s), err: %v", diskName, diskURI, err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -426,3 +436,8 @@ func getValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourc
|
||||||
SourceResourceID: &sourceResourceID,
|
SourceResourceID: &sourceResourceID,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isInstanceNotFoundError(err error) bool {
|
||||||
|
errMsg := strings.ToLower(err.Error())
|
||||||
|
return strings.Contains(errMsg, errStatusCode400) && strings.Contains(errMsg, errInvalidParameter) && strings.Contains(errMsg, errTargetInstanceIds)
|
||||||
|
}
|
||||||
|
|
|
@ -142,7 +142,11 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N
|
||||||
(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
|
(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
|
||||||
// found the disk
|
// found the disk
|
||||||
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||||
disks[i].ToBeDetached = to.BoolPtr(true)
|
if strings.EqualFold(as.cloud.Environment.Name, "AZURESTACKCLOUD") {
|
||||||
|
disks = append(disks[:i], disks[i+1:]...)
|
||||||
|
} else {
|
||||||
|
disks[i].ToBeDetached = to.BoolPtr(true)
|
||||||
|
}
|
||||||
bFoundDisk = true
|
bFoundDisk = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -147,7 +147,11 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
|
||||||
(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
|
(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
|
||||||
// found the disk
|
// found the disk
|
||||||
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||||
disks[i].ToBeDetached = to.BoolPtr(true)
|
if strings.EqualFold(ss.cloud.Environment.Name, "AZURESTACKCLOUD") {
|
||||||
|
disks = append(disks[:i], disks[i+1:]...)
|
||||||
|
} else {
|
||||||
|
disks[i].ToBeDetached = to.BoolPtr(true)
|
||||||
|
}
|
||||||
bFoundDisk = true
|
bFoundDisk = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -761,6 +761,14 @@ func (fDC *fakeDisksClient) Get(ctx context.Context, resourceGroupName string, d
|
||||||
errors.New("Not such Disk"))
|
errors.New("Not such Disk"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fDC *fakeDisksClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) {
|
||||||
|
return []compute.Disk{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fDC *fakeDisksClient) Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetTestCloud returns a fake azure cloud for unit tests in Azure related CSI drivers
|
// GetTestCloud returns a fake azure cloud for unit tests in Azure related CSI drivers
|
||||||
func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
|
func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
|
||||||
az = &Cloud{
|
az = &Cloud{
|
||||||
|
@ -800,7 +808,7 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
|
||||||
az.VirtualMachineScaleSetsClient = newFakeVirtualMachineScaleSetsClient()
|
az.VirtualMachineScaleSetsClient = newFakeVirtualMachineScaleSetsClient()
|
||||||
az.VirtualMachineScaleSetVMsClient = newFakeVirtualMachineScaleSetVMsClient()
|
az.VirtualMachineScaleSetVMsClient = newFakeVirtualMachineScaleSetVMsClient()
|
||||||
az.VirtualMachinesClient = newFakeAzureVirtualMachinesClient()
|
az.VirtualMachinesClient = newFakeAzureVirtualMachinesClient()
|
||||||
az.vmSet = newAvailabilitySet(az)
|
az.VMSet = newAvailabilitySet(az)
|
||||||
az.vmCache, _ = az.newVMCache()
|
az.vmCache, _ = az.newVMCache()
|
||||||
az.lbCache, _ = az.newLBCache()
|
az.lbCache, _ = az.newLBCache()
|
||||||
az.nsgCache, _ = az.newNSGCache()
|
az.nsgCache, _ = az.newNSGCache()
|
||||||
|
|
|
@ -94,11 +94,11 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||||
|
|
||||||
// Not local instance, get addresses from Azure ARM API.
|
// Not local instance, get addresses from Azure ARM API.
|
||||||
if !isLocalInstance {
|
if !isLocalInstance {
|
||||||
if az.vmSet != nil {
|
if az.VMSet != nil {
|
||||||
return addressGetter(name)
|
return addressGetter(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// vmSet == nil indicates credentials are not provided.
|
// VMSet == nil indicates credentials are not provided.
|
||||||
return nil, fmt.Errorf("no credentials provided for Azure cloud provider")
|
return nil, fmt.Errorf("no credentials provided for Azure cloud provider")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
name, err := az.VMSet.GetNodeNameByProviderID(providerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -185,7 +185,7 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
name, err := az.VMSet.GetNodeNameByProviderID(providerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == cloudprovider.InstanceNotFound {
|
if err == cloudprovider.InstanceNotFound {
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -210,7 +210,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
|
// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
|
||||||
if err == cloudprovider.InstanceNotFound {
|
if err == cloudprovider.InstanceNotFound {
|
||||||
|
@ -220,7 +220,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
powerStatus, err := az.vmSet.GetPowerStatusByNodeName(string(nodeName))
|
powerStatus, err := az.VMSet.GetPowerStatusByNodeName(string(nodeName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
|
// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
|
||||||
if err == cloudprovider.InstanceNotFound {
|
if err == cloudprovider.InstanceNotFound {
|
||||||
|
@ -287,8 +287,8 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||||
|
|
||||||
// Not local instance, get instanceID from Azure ARM API.
|
// Not local instance, get instanceID from Azure ARM API.
|
||||||
if !isLocalInstance {
|
if !isLocalInstance {
|
||||||
if az.vmSet != nil {
|
if az.VMSet != nil {
|
||||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
return az.VMSet.GetInstanceIDByNodeName(nodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// vmSet == nil indicates credentials are not provided.
|
// vmSet == nil indicates credentials are not provided.
|
||||||
|
@ -317,7 +317,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||||
return az.getVmssMachineID(subscriptionID, resourceGroup, ssName, instanceID), nil
|
return az.getVmssMachineID(subscriptionID, resourceGroup, ssName, instanceID), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
return az.VMSet.GetInstanceIDByNodeName(nodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
|
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
|
||||||
|
@ -334,7 +334,7 @@ func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
name, err := az.VMSet.GetNodeNameByProviderID(providerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -372,8 +372,8 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
if !isLocalInstance {
|
if !isLocalInstance {
|
||||||
if az.vmSet != nil {
|
if az.VMSet != nil {
|
||||||
return az.vmSet.GetInstanceTypeByNodeName(string(name))
|
return az.VMSet.GetInstanceTypeByNodeName(string(name))
|
||||||
}
|
}
|
||||||
|
|
||||||
// vmSet == nil indicates credentials are not provided.
|
// vmSet == nil indicates credentials are not provided.
|
||||||
|
@ -385,7 +385,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return az.vmSet.GetInstanceTypeByNodeName(string(name))
|
return az.VMSet.GetInstanceTypeByNodeName(string(name))
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
servicehelpers "k8s.io/cloud-provider/service/helpers"
|
servicehelpers "k8s.io/cloud-provider/service/helpers"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
azcache "k8s.io/legacy-cloud-providers/azure/cache"
|
azcache "k8s.io/legacy-cloud-providers/azure/cache"
|
||||||
|
"k8s.io/legacy-cloud-providers/azure/retry"
|
||||||
utilnet "k8s.io/utils/net"
|
utilnet "k8s.io/utils/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -216,7 +217,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
|
||||||
klog.V(5).Infof("Delete service (%s): START clusterName=%q", serviceName, clusterName)
|
klog.V(5).Infof("Delete service (%s): START clusterName=%q", serviceName, clusterName)
|
||||||
|
|
||||||
serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal)
|
serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal)
|
||||||
if err != nil {
|
if err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,7 +226,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := az.reconcileLoadBalancer(clusterName, service, nil, false /* wantLb */); err != nil {
|
if _, err := az.reconcileLoadBalancer(clusterName, service, nil, false /* wantLb */); err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,7 +259,7 @@ func (az *Cloud) getLoadBalancerResourceGroup() string {
|
||||||
func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) {
|
func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) {
|
||||||
isInternal := requiresInternalLoadBalancer(service)
|
isInternal := requiresInternalLoadBalancer(service)
|
||||||
var defaultLB *network.LoadBalancer
|
var defaultLB *network.LoadBalancer
|
||||||
primaryVMSetName := az.vmSet.GetPrimaryVMSetName()
|
primaryVMSetName := az.VMSet.GetPrimaryVMSetName()
|
||||||
defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal)
|
defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal)
|
||||||
|
|
||||||
existingLBs, err := az.ListLB(service)
|
existingLBs, err := az.ListLB(service)
|
||||||
|
@ -331,7 +332,7 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||||
isInternal := requiresInternalLoadBalancer(service)
|
isInternal := requiresInternalLoadBalancer(service)
|
||||||
serviceName := getServiceName(service)
|
serviceName := getServiceName(service)
|
||||||
klog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal)
|
klog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal)
|
||||||
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
|
vmSetNames, err := az.VMSet.GetVMSetNames(service, nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
|
@ -937,7 +938,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||||
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
|
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
|
||||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||||
klog.V(10).Infof("EnsureBackendPoolDeleted(%s,%s) for service %s: start", lbBackendPoolID, vmSetName, serviceName)
|
klog.V(10).Infof("EnsureBackendPoolDeleted(%s,%s) for service %s: start", lbBackendPoolID, vmSetName, serviceName)
|
||||||
err := az.vmSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
|
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err)
|
klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -981,7 +982,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||||
// Etag would be changed when updating backend pools, so invalidate lbCache after it.
|
// Etag would be changed when updating backend pools, so invalidate lbCache after it.
|
||||||
defer az.lbCache.Delete(lbName)
|
defer az.lbCache.Delete(lbName)
|
||||||
err := az.vmSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal)
|
err := az.VMSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -287,11 +287,15 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan
|
||||||
return newSizeQuant, nil
|
return newSizeQuant, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
result.DiskProperties.DiskSizeGB = &requestGiB
|
diskParameter := compute.DiskUpdate{
|
||||||
|
DiskUpdateProperties: &compute.DiskUpdateProperties{
|
||||||
|
DiskSizeGB: &requestGiB,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
ctx, cancel = getContextWithCancel()
|
ctx, cancel = getContextWithCancel()
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if rerr := c.common.cloud.DisksClient.CreateOrUpdate(ctx, resourceGroup, diskName, result); rerr != nil {
|
if rerr := c.common.cloud.DisksClient.Update(ctx, resourceGroup, diskName, diskParameter); rerr != nil {
|
||||||
return oldSize, rerr.Error()
|
return oldSize, rerr.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -135,7 +135,7 @@ func (az *Cloud) getNetworkResourceSubscriptionID() string {
|
||||||
func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) {
|
func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) {
|
||||||
vmSetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix)
|
vmSetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix)
|
||||||
if strings.EqualFold(clusterName, vmSetName) {
|
if strings.EqualFold(clusterName, vmSetName) {
|
||||||
vmSetName = az.vmSet.GetPrimaryVMSetName()
|
vmSetName = az.VMSet.GetPrimaryVMSetName()
|
||||||
}
|
}
|
||||||
|
|
||||||
return vmSetName
|
return vmSetName
|
||||||
|
@ -150,7 +150,7 @@ func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string,
|
||||||
clusterName = az.LoadBalancerName
|
clusterName = az.LoadBalancerName
|
||||||
}
|
}
|
||||||
lbNamePrefix := vmSetName
|
lbNamePrefix := vmSetName
|
||||||
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
|
if strings.EqualFold(vmSetName, az.VMSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
|
||||||
lbNamePrefix = clusterName
|
lbNamePrefix = clusterName
|
||||||
}
|
}
|
||||||
if isInternal {
|
if isInternal {
|
||||||
|
@ -732,7 +732,7 @@ func (as *availabilitySet) EnsureHostInPool(service *v1.Service, nodeName types.
|
||||||
return "", "", "", nil, nil
|
return "", "", "", nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Errorf("error: az.EnsureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
|
klog.Errorf("error: az.EnsureHostInPool(%s), az.VMSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
|
||||||
return "", "", "", nil, err
|
return "", "", "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -531,17 +531,21 @@ func (ss *scaleSet) GetPrivateIPsByNodeName(nodeName string) ([]string, error) {
|
||||||
|
|
||||||
// This returns the full identifier of the primary NIC for the given VM.
|
// This returns the full identifier of the primary NIC for the given VM.
|
||||||
func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) {
|
func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) {
|
||||||
|
if machine.NetworkProfile == nil || machine.NetworkProfile.NetworkInterfaces == nil {
|
||||||
|
return "", fmt.Errorf("failed to find the network interfaces for vm %s", to.String(machine.Name))
|
||||||
|
}
|
||||||
|
|
||||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
||||||
if *ref.Primary {
|
if to.Bool(ref.Primary) {
|
||||||
return *ref.ID, nil
|
return *ref.ID, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
|
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", to.String(machine.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
// getVmssMachineID returns the full identifier of a vmss virtual machine.
|
// getVmssMachineID returns the full identifier of a vmss virtual machine.
|
||||||
|
@ -659,6 +663,9 @@ func (ss *scaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compu
|
||||||
allVMs, rerr := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, string(compute.InstanceView))
|
allVMs, rerr := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, string(compute.InstanceView))
|
||||||
if rerr != nil {
|
if rerr != nil {
|
||||||
klog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", rerr)
|
klog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", rerr)
|
||||||
|
if rerr.IsNotFound() {
|
||||||
|
return nil, cloudprovider.InstanceNotFound
|
||||||
|
}
|
||||||
return nil, rerr.Error()
|
return nil, rerr.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -87,7 +87,7 @@ func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cloudprovider.Zone{}, fmt.Errorf("failure getting hostname from kernel")
|
return cloudprovider.Zone{}, fmt.Errorf("failure getting hostname from kernel")
|
||||||
}
|
}
|
||||||
return az.vmSet.GetZoneByNodeName(strings.ToLower(hostname))
|
return az.VMSet.GetZoneByNodeName(strings.ToLower(hostname))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetZoneByProviderID implements Zones.GetZoneByProviderID
|
// GetZoneByProviderID implements Zones.GetZoneByProviderID
|
||||||
|
@ -104,7 +104,7 @@ func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cl
|
||||||
return cloudprovider.Zone{}, nil
|
return cloudprovider.Zone{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cloudprovider.Zone{}, err
|
return cloudprovider.Zone{}, err
|
||||||
}
|
}
|
||||||
|
@ -126,5 +126,5 @@ func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName)
|
||||||
return cloudprovider.Zone{}, nil
|
return cloudprovider.Zone{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return az.vmSet.GetZoneByNodeName(string(nodeName))
|
return az.VMSet.GetZoneByNodeName(string(nodeName))
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ go_library(
|
||||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library",
|
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library",
|
||||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||||
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
|
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
|
||||||
|
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||||
"//vendor/k8s.io/klog:go_default_library",
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -31,6 +32,7 @@ go_test(
|
||||||
"//staging/src/k8s.io/legacy-cloud-providers/azure/clients:go_default_library",
|
"//staging/src/k8s.io/legacy-cloud-providers/azure/clients:go_default_library",
|
||||||
"//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient:go_default_library",
|
"//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient:go_default_library",
|
||||||
"//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient:go_default_library",
|
"//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient:go_default_library",
|
||||||
|
"//staging/src/k8s.io/legacy-cloud-providers/azure/retry:go_default_library",
|
||||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library",
|
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library",
|
||||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||||
|
|
193
vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go
generated
vendored
193
vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go
generated
vendored
|
@ -20,12 +20,14 @@ package diskclient
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
|
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
|
||||||
"github.com/Azure/go-autorest/autorest"
|
"github.com/Azure/go-autorest/autorest"
|
||||||
"github.com/Azure/go-autorest/autorest/azure"
|
"github.com/Azure/go-autorest/autorest/azure"
|
||||||
|
"github.com/Azure/go-autorest/autorest/to"
|
||||||
|
|
||||||
"k8s.io/client-go/util/flowcontrol"
|
"k8s.io/client-go/util/flowcontrol"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
|
@ -204,6 +206,74 @@ func (c *Client) createOrUpdateResponder(resp *http.Response) (*compute.Disk, *r
|
||||||
return result, retry.GetError(resp, err)
|
return result, retry.GetError(resp, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update creates or updates a Disk.
|
||||||
|
func (c *Client) Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
|
||||||
|
mc := metrics.NewMetricContext("disks", "update", resourceGroupName, c.subscriptionID, "")
|
||||||
|
|
||||||
|
// Report errors if the client is rate limited.
|
||||||
|
if !c.rateLimiterWriter.TryAccept() {
|
||||||
|
mc.RateLimitedCount()
|
||||||
|
return retry.GetRateLimitError(true, "DiskUpdate")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Report errors if the client is throttled.
|
||||||
|
if c.RetryAfterWriter.After(time.Now()) {
|
||||||
|
mc.ThrottledCount()
|
||||||
|
rerr := retry.GetThrottlingError("DiskUpdate", "client throttled", c.RetryAfterWriter)
|
||||||
|
return rerr
|
||||||
|
}
|
||||||
|
|
||||||
|
rerr := c.updateDisk(ctx, resourceGroupName, diskName, diskParameter)
|
||||||
|
mc.Observe(rerr.Error())
|
||||||
|
if rerr != nil {
|
||||||
|
if rerr.IsThrottled() {
|
||||||
|
// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
|
||||||
|
c.RetryAfterWriter = rerr.RetryAfter
|
||||||
|
}
|
||||||
|
|
||||||
|
return rerr
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateDisk updates a Disk.
|
||||||
|
func (c *Client) updateDisk(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
|
||||||
|
resourceID := armclient.GetResourceID(
|
||||||
|
c.subscriptionID,
|
||||||
|
resourceGroupName,
|
||||||
|
"Microsoft.Compute/disks",
|
||||||
|
diskName,
|
||||||
|
)
|
||||||
|
|
||||||
|
response, rerr := c.armClient.PatchResource(ctx, resourceID, diskParameter)
|
||||||
|
defer c.armClient.CloseResponse(ctx, response)
|
||||||
|
if rerr != nil {
|
||||||
|
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.put.request", resourceID, rerr.Error())
|
||||||
|
return rerr
|
||||||
|
}
|
||||||
|
|
||||||
|
if response != nil && response.StatusCode != http.StatusNoContent {
|
||||||
|
_, rerr = c.updateResponder(response)
|
||||||
|
if rerr != nil {
|
||||||
|
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.put.respond", resourceID, rerr.Error())
|
||||||
|
return rerr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) updateResponder(resp *http.Response) (*compute.Disk, *retry.Error) {
|
||||||
|
result := &compute.Disk{}
|
||||||
|
err := autorest.Respond(
|
||||||
|
resp,
|
||||||
|
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
|
||||||
|
autorest.ByUnmarshallingJSON(&result))
|
||||||
|
result.Response = autorest.Response{Response: resp}
|
||||||
|
return result, retry.GetError(resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
// Delete deletes a Disk by name.
|
// Delete deletes a Disk by name.
|
||||||
func (c *Client) Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error {
|
func (c *Client) Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error {
|
||||||
mc := metrics.NewMetricContext("disks", "delete", resourceGroupName, c.subscriptionID, "")
|
mc := metrics.NewMetricContext("disks", "delete", resourceGroupName, c.subscriptionID, "")
|
||||||
|
@ -246,3 +316,126 @@ func (c *Client) deleteDisk(ctx context.Context, resourceGroupName string, diskN
|
||||||
|
|
||||||
return c.armClient.DeleteResource(ctx, resourceID, "")
|
return c.armClient.DeleteResource(ctx, resourceID, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ListByResourceGroup lists all the disks under a resource group.
|
||||||
|
func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) {
|
||||||
|
resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks",
|
||||||
|
autorest.Encode("path", c.subscriptionID),
|
||||||
|
autorest.Encode("path", resourceGroupName))
|
||||||
|
|
||||||
|
result := make([]compute.Disk, 0)
|
||||||
|
page := &DiskListPage{}
|
||||||
|
page.fn = c.listNextResults
|
||||||
|
|
||||||
|
resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
|
||||||
|
defer c.armClient.CloseResponse(ctx, resp)
|
||||||
|
if rerr != nil {
|
||||||
|
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.request", resourceID, rerr.Error())
|
||||||
|
return result, rerr
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
page.dl, err = c.listResponder(resp)
|
||||||
|
if err != nil {
|
||||||
|
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.respond", resourceID, err)
|
||||||
|
return result, retry.GetError(resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for page.NotDone() {
|
||||||
|
result = append(result, *page.Response().Value...)
|
||||||
|
if err = page.NextWithContext(ctx); err != nil {
|
||||||
|
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.next", resourceID, err)
|
||||||
|
return result, retry.GetError(page.Response().Response.Response, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// listNextResults retrieves the next set of results, if any.
|
||||||
|
func (c *Client) listNextResults(ctx context.Context, lastResults compute.DiskList) (result compute.DiskList, err error) {
|
||||||
|
req, err := c.diskListPreparer(ctx, lastResults)
|
||||||
|
if err != nil {
|
||||||
|
return result, autorest.NewErrorWithError(err, "diskclient", "listNextResults", nil, "Failure preparing next results request")
|
||||||
|
}
|
||||||
|
if req == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, rerr := c.armClient.Send(ctx, req)
|
||||||
|
defer c.armClient.CloseResponse(ctx, resp)
|
||||||
|
if rerr != nil {
|
||||||
|
result.Response = autorest.Response{Response: resp}
|
||||||
|
return result, autorest.NewErrorWithError(rerr.Error(), "diskclient", "listNextResults", resp, "Failure sending next results request")
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err = c.listResponder(resp)
|
||||||
|
if err != nil {
|
||||||
|
err = autorest.NewErrorWithError(err, "diskclient", "listNextResults", resp, "Failure responding to next results request")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// listResponder handles the response to the List request. The method always
|
||||||
|
// closes the http.Response Body.
|
||||||
|
func (c *Client) listResponder(resp *http.Response) (result compute.DiskList, err error) {
|
||||||
|
err = autorest.Respond(
|
||||||
|
resp,
|
||||||
|
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||||
|
autorest.ByUnmarshallingJSON(&result),
|
||||||
|
autorest.ByClosing())
|
||||||
|
result.Response = autorest.Response{Response: resp}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) diskListPreparer(ctx context.Context, lr compute.DiskList) (*http.Request, error) {
|
||||||
|
if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return autorest.Prepare((&http.Request{}).WithContext(ctx),
|
||||||
|
autorest.AsJSON(),
|
||||||
|
autorest.AsGet(),
|
||||||
|
autorest.WithBaseURL(to.String(lr.NextLink)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiskListPage contains a page of Disk values.
|
||||||
|
type DiskListPage struct {
|
||||||
|
fn func(context.Context, compute.DiskList) (compute.DiskList, error)
|
||||||
|
dl compute.DiskList
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextWithContext advances to the next page of values. If there was an error making
|
||||||
|
// the request the page does not advance and the error is returned.
|
||||||
|
func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) {
|
||||||
|
next, err := page.fn(ctx, page.dl)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
page.dl = next
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next advances to the next page of values. If there was an error making
|
||||||
|
// the request the page does not advance and the error is returned.
|
||||||
|
// Deprecated: Use NextWithContext() instead.
|
||||||
|
func (page *DiskListPage) Next() error {
|
||||||
|
return page.NextWithContext(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotDone returns true if the page enumeration should be started or is not yet complete.
|
||||||
|
func (page DiskListPage) NotDone() bool {
|
||||||
|
return !page.dl.IsEmpty()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response returns the raw server response from the last page request.
|
||||||
|
func (page DiskListPage) Response() compute.DiskList {
|
||||||
|
return page.dl
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values returns the slice of values for the current page or nil if there are no values.
|
||||||
|
func (page DiskListPage) Values() []compute.Disk {
|
||||||
|
if page.dl.IsEmpty() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return *page.dl.Value
|
||||||
|
}
|
||||||
|
|
|
@ -40,6 +40,12 @@ type Interface interface {
|
||||||
// CreateOrUpdate creates or updates a Disk.
|
// CreateOrUpdate creates or updates a Disk.
|
||||||
CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) *retry.Error
|
CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) *retry.Error
|
||||||
|
|
||||||
|
// Update updates a Disk.
|
||||||
|
Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error
|
||||||
|
|
||||||
// Delete deletes a Disk by name.
|
// Delete deletes a Disk by name.
|
||||||
Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error
|
Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error
|
||||||
|
|
||||||
|
// ListByResourceGroup lists all the disks under a resource group.
|
||||||
|
ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error)
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,6 +86,7 @@ func registerAPIMetrics(attributes ...string) *apiCallMetrics {
|
||||||
&metrics.HistogramOpts{
|
&metrics.HistogramOpts{
|
||||||
Name: "cloudprovider_azure_api_request_duration_seconds",
|
Name: "cloudprovider_azure_api_request_duration_seconds",
|
||||||
Help: "Latency of an Azure API call",
|
Help: "Latency of an Azure API call",
|
||||||
|
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50, 120, 300, 600, 1200},
|
||||||
StabilityLevel: metrics.ALPHA,
|
StabilityLevel: metrics.ALPHA,
|
||||||
},
|
},
|
||||||
attributes,
|
attributes,
|
||||||
|
|
|
@ -89,6 +89,15 @@ func (err *Error) IsThrottled() bool {
|
||||||
return err.HTTPStatusCode == http.StatusTooManyRequests || err.RetryAfter.After(now())
|
return err.HTTPStatusCode == http.StatusTooManyRequests || err.RetryAfter.After(now())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsNotFound returns true the if the requested object wasn't found
|
||||||
|
func (err *Error) IsNotFound() bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return err.HTTPStatusCode == http.StatusNotFound
|
||||||
|
}
|
||||||
|
|
||||||
// NewError creates a new Error.
|
// NewError creates a new Error.
|
||||||
func NewError(retriable bool, err error) *Error {
|
func NewError(retriable bool, err error) *Error {
|
||||||
return &Error{
|
return &Error{
|
||||||
|
@ -286,3 +295,20 @@ func IsErrorRetriable(err error) bool {
|
||||||
|
|
||||||
return strings.Contains(err.Error(), "Retriable: true")
|
return strings.Contains(err.Error(), "Retriable: true")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasStatusForbiddenOrIgnoredError return true if the given error code is part of the error message
|
||||||
|
// This should only be used when trying to delete resources
|
||||||
|
func HasStatusForbiddenOrIgnoredError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(err.Error(), fmt.Sprintf("HTTPStatusCode: %d", http.StatusNotFound)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(err.Error(), fmt.Sprintf("HTTPStatusCode: %d", http.StatusForbidden)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
|
@ -1140,7 +1140,7 @@ gopkg.in/square/go-jose.v2/jwt
|
||||||
gopkg.in/warnings.v0
|
gopkg.in/warnings.v0
|
||||||
# gopkg.in/yaml.v2 v2.2.8
|
# gopkg.in/yaml.v2 v2.2.8
|
||||||
gopkg.in/yaml.v2
|
gopkg.in/yaml.v2
|
||||||
# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.9-k3s1
|
# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.10-k3s1
|
||||||
k8s.io/api/admission/v1
|
k8s.io/api/admission/v1
|
||||||
k8s.io/api/admission/v1beta1
|
k8s.io/api/admission/v1beta1
|
||||||
k8s.io/api/admissionregistration/v1
|
k8s.io/api/admissionregistration/v1
|
||||||
|
@ -1184,7 +1184,7 @@ k8s.io/api/settings/v1alpha1
|
||||||
k8s.io/api/storage/v1
|
k8s.io/api/storage/v1
|
||||||
k8s.io/api/storage/v1alpha1
|
k8s.io/api/storage/v1alpha1
|
||||||
k8s.io/api/storage/v1beta1
|
k8s.io/api/storage/v1beta1
|
||||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.9-k3s1
|
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.10-k3s1
|
||||||
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
||||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
||||||
|
@ -1224,7 +1224,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
|
||||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
||||||
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||||
# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.9-k3s1
|
# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.10-k3s1
|
||||||
k8s.io/apimachinery/pkg/api/equality
|
k8s.io/apimachinery/pkg/api/equality
|
||||||
k8s.io/apimachinery/pkg/api/errors
|
k8s.io/apimachinery/pkg/api/errors
|
||||||
k8s.io/apimachinery/pkg/api/meta
|
k8s.io/apimachinery/pkg/api/meta
|
||||||
|
@ -1286,7 +1286,7 @@ k8s.io/apimachinery/pkg/watch
|
||||||
k8s.io/apimachinery/third_party/forked/golang/json
|
k8s.io/apimachinery/third_party/forked/golang/json
|
||||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||||
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.9-k3s1
|
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.10-k3s1
|
||||||
k8s.io/apiserver/pkg/admission
|
k8s.io/apiserver/pkg/admission
|
||||||
k8s.io/apiserver/pkg/admission/configuration
|
k8s.io/apiserver/pkg/admission/configuration
|
||||||
k8s.io/apiserver/pkg/admission/initializer
|
k8s.io/apiserver/pkg/admission/initializer
|
||||||
|
@ -1416,7 +1416,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
|
||||||
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.9-k3s1
|
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.10-k3s1
|
||||||
k8s.io/cli-runtime/pkg/genericclioptions
|
k8s.io/cli-runtime/pkg/genericclioptions
|
||||||
k8s.io/cli-runtime/pkg/kustomize
|
k8s.io/cli-runtime/pkg/kustomize
|
||||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
|
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
|
||||||
|
@ -1429,7 +1429,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
|
||||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
|
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
|
||||||
k8s.io/cli-runtime/pkg/printers
|
k8s.io/cli-runtime/pkg/printers
|
||||||
k8s.io/cli-runtime/pkg/resource
|
k8s.io/cli-runtime/pkg/resource
|
||||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.9-k3s1
|
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.10-k3s1
|
||||||
k8s.io/client-go/discovery
|
k8s.io/client-go/discovery
|
||||||
k8s.io/client-go/discovery/cached
|
k8s.io/client-go/discovery/cached
|
||||||
k8s.io/client-go/discovery/cached/disk
|
k8s.io/client-go/discovery/cached/disk
|
||||||
|
@ -1663,7 +1663,7 @@ k8s.io/client-go/util/jsonpath
|
||||||
k8s.io/client-go/util/keyutil
|
k8s.io/client-go/util/keyutil
|
||||||
k8s.io/client-go/util/retry
|
k8s.io/client-go/util/retry
|
||||||
k8s.io/client-go/util/workqueue
|
k8s.io/client-go/util/workqueue
|
||||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.9-k3s1
|
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.10-k3s1
|
||||||
k8s.io/cloud-provider
|
k8s.io/cloud-provider
|
||||||
k8s.io/cloud-provider/api
|
k8s.io/cloud-provider/api
|
||||||
k8s.io/cloud-provider/node/helpers
|
k8s.io/cloud-provider/node/helpers
|
||||||
|
@ -1671,13 +1671,13 @@ k8s.io/cloud-provider/service/helpers
|
||||||
k8s.io/cloud-provider/volume
|
k8s.io/cloud-provider/volume
|
||||||
k8s.io/cloud-provider/volume/errors
|
k8s.io/cloud-provider/volume/errors
|
||||||
k8s.io/cloud-provider/volume/helpers
|
k8s.io/cloud-provider/volume/helpers
|
||||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.9-k3s1
|
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.10-k3s1
|
||||||
k8s.io/cluster-bootstrap/token/api
|
k8s.io/cluster-bootstrap/token/api
|
||||||
k8s.io/cluster-bootstrap/token/jws
|
k8s.io/cluster-bootstrap/token/jws
|
||||||
k8s.io/cluster-bootstrap/token/util
|
k8s.io/cluster-bootstrap/token/util
|
||||||
k8s.io/cluster-bootstrap/util/secrets
|
k8s.io/cluster-bootstrap/util/secrets
|
||||||
k8s.io/cluster-bootstrap/util/tokens
|
k8s.io/cluster-bootstrap/util/tokens
|
||||||
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.9-k3s1
|
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.10-k3s1
|
||||||
k8s.io/code-generator/cmd/client-gen/args
|
k8s.io/code-generator/cmd/client-gen/args
|
||||||
k8s.io/code-generator/cmd/client-gen/generators
|
k8s.io/code-generator/cmd/client-gen/generators
|
||||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||||
|
@ -1692,7 +1692,7 @@ k8s.io/code-generator/cmd/lister-gen/args
|
||||||
k8s.io/code-generator/cmd/lister-gen/generators
|
k8s.io/code-generator/cmd/lister-gen/generators
|
||||||
k8s.io/code-generator/pkg/namer
|
k8s.io/code-generator/pkg/namer
|
||||||
k8s.io/code-generator/pkg/util
|
k8s.io/code-generator/pkg/util
|
||||||
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.9-k3s1
|
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.10-k3s1
|
||||||
k8s.io/component-base/cli/flag
|
k8s.io/component-base/cli/flag
|
||||||
k8s.io/component-base/cli/globalflag
|
k8s.io/component-base/cli/globalflag
|
||||||
k8s.io/component-base/codec
|
k8s.io/component-base/codec
|
||||||
|
@ -1710,10 +1710,10 @@ k8s.io/component-base/metrics/prometheus/workqueue
|
||||||
k8s.io/component-base/metrics/testutil
|
k8s.io/component-base/metrics/testutil
|
||||||
k8s.io/component-base/version
|
k8s.io/component-base/version
|
||||||
k8s.io/component-base/version/verflag
|
k8s.io/component-base/version/verflag
|
||||||
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.9-k3s1
|
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.10-k3s1
|
||||||
k8s.io/cri-api/pkg/apis
|
k8s.io/cri-api/pkg/apis
|
||||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.9-k3s1
|
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.10-k3s1
|
||||||
k8s.io/csi-translation-lib
|
k8s.io/csi-translation-lib
|
||||||
k8s.io/csi-translation-lib/plugins
|
k8s.io/csi-translation-lib/plugins
|
||||||
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
|
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
|
||||||
|
@ -1728,7 +1728,7 @@ k8s.io/gengo/types
|
||||||
k8s.io/heapster/metrics/api/v1/types
|
k8s.io/heapster/metrics/api/v1/types
|
||||||
# k8s.io/klog v1.0.0
|
# k8s.io/klog v1.0.0
|
||||||
k8s.io/klog
|
k8s.io/klog
|
||||||
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.9-k3s1
|
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.10-k3s1
|
||||||
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
||||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||||
|
@ -1756,7 +1756,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
|
||||||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||||
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||||
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
||||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.9-k3s1
|
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.10-k3s1
|
||||||
k8s.io/kube-controller-manager/config/v1alpha1
|
k8s.io/kube-controller-manager/config/v1alpha1
|
||||||
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
|
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
|
||||||
k8s.io/kube-openapi/pkg/aggregator
|
k8s.io/kube-openapi/pkg/aggregator
|
||||||
|
@ -1767,14 +1767,14 @@ k8s.io/kube-openapi/pkg/schemaconv
|
||||||
k8s.io/kube-openapi/pkg/util
|
k8s.io/kube-openapi/pkg/util
|
||||||
k8s.io/kube-openapi/pkg/util/proto
|
k8s.io/kube-openapi/pkg/util/proto
|
||||||
k8s.io/kube-openapi/pkg/util/proto/validation
|
k8s.io/kube-openapi/pkg/util/proto/validation
|
||||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.9-k3s1
|
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.10-k3s1
|
||||||
k8s.io/kube-proxy/config/v1alpha1
|
k8s.io/kube-proxy/config/v1alpha1
|
||||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.9-k3s1
|
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.10-k3s1
|
||||||
k8s.io/kube-scheduler/config/v1
|
k8s.io/kube-scheduler/config/v1
|
||||||
k8s.io/kube-scheduler/config/v1alpha1
|
k8s.io/kube-scheduler/config/v1alpha1
|
||||||
k8s.io/kube-scheduler/config/v1alpha2
|
k8s.io/kube-scheduler/config/v1alpha2
|
||||||
k8s.io/kube-scheduler/extender/v1
|
k8s.io/kube-scheduler/extender/v1
|
||||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.9-k3s1
|
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.10-k3s1
|
||||||
k8s.io/kubectl/pkg/apps
|
k8s.io/kubectl/pkg/apps
|
||||||
k8s.io/kubectl/pkg/cmd
|
k8s.io/kubectl/pkg/cmd
|
||||||
k8s.io/kubectl/pkg/cmd/annotate
|
k8s.io/kubectl/pkg/cmd/annotate
|
||||||
|
@ -1849,11 +1849,11 @@ k8s.io/kubectl/pkg/util/storage
|
||||||
k8s.io/kubectl/pkg/util/templates
|
k8s.io/kubectl/pkg/util/templates
|
||||||
k8s.io/kubectl/pkg/util/term
|
k8s.io/kubectl/pkg/util/term
|
||||||
k8s.io/kubectl/pkg/validation
|
k8s.io/kubectl/pkg/validation
|
||||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.9-k3s1
|
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.10-k3s1
|
||||||
k8s.io/kubelet/config/v1beta1
|
k8s.io/kubelet/config/v1beta1
|
||||||
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
|
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
|
||||||
k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
||||||
# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.9-k3s1
|
# k8s.io/kubernetes v1.18.10 => github.com/rancher/kubernetes v1.18.10-k3s1
|
||||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app
|
k8s.io/kubernetes/cmd/cloud-controller-manager/app
|
||||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
|
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
|
||||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
|
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
|
||||||
|
@ -1873,7 +1873,6 @@ k8s.io/kubernetes/cmd/kube-scheduler/app/config
|
||||||
k8s.io/kubernetes/cmd/kube-scheduler/app/options
|
k8s.io/kubernetes/cmd/kube-scheduler/app/options
|
||||||
k8s.io/kubernetes/cmd/kubelet/app
|
k8s.io/kubernetes/cmd/kubelet/app
|
||||||
k8s.io/kubernetes/cmd/kubelet/app/options
|
k8s.io/kubernetes/cmd/kubelet/app/options
|
||||||
k8s.io/kubernetes/pkg/api/endpoints
|
|
||||||
k8s.io/kubernetes/pkg/api/legacyscheme
|
k8s.io/kubernetes/pkg/api/legacyscheme
|
||||||
k8s.io/kubernetes/pkg/api/persistentvolume
|
k8s.io/kubernetes/pkg/api/persistentvolume
|
||||||
k8s.io/kubernetes/pkg/api/persistentvolumeclaim
|
k8s.io/kubernetes/pkg/api/persistentvolumeclaim
|
||||||
|
@ -2597,7 +2596,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
||||||
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
||||||
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
||||||
k8s.io/kubernetes/third_party/forked/ipvs
|
k8s.io/kubernetes/third_party/forked/ipvs
|
||||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.9-k3s1
|
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.10-k3s1
|
||||||
k8s.io/legacy-cloud-providers/aws
|
k8s.io/legacy-cloud-providers/aws
|
||||||
k8s.io/legacy-cloud-providers/azure
|
k8s.io/legacy-cloud-providers/azure
|
||||||
k8s.io/legacy-cloud-providers/azure/auth
|
k8s.io/legacy-cloud-providers/azure/auth
|
||||||
|
@ -2628,7 +2627,7 @@ k8s.io/legacy-cloud-providers/openstack
|
||||||
k8s.io/legacy-cloud-providers/vsphere
|
k8s.io/legacy-cloud-providers/vsphere
|
||||||
k8s.io/legacy-cloud-providers/vsphere/vclib
|
k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.9-k3s1
|
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.10-k3s1
|
||||||
k8s.io/metrics/pkg/apis/custom_metrics
|
k8s.io/metrics/pkg/apis/custom_metrics
|
||||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
||||||
|
|
Loading…
Reference in New Issue