mirror of https://github.com/k3s-io/k3s
Update k8s 1.18.3
parent
25d43177ff
commit
f1a01f4afb
50
go.mod
50
go.mod
|
@ -32,31 +32,31 @@ replace (
|
||||||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
||||||
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
||||||
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
|
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
|
||||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.2-k3s.1
|
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.3-k3s1
|
||||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.2-k3s.1
|
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.3-k3s1
|
||||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.2-k3s.1
|
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.3-k3s1
|
||||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.2-k3s.1
|
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.3-k3s1
|
||||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.2-k3s.1
|
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.3-k3s1
|
||||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.2-k3s.1
|
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.3-k3s1
|
||||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.2-k3s.1
|
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.3-k3s1
|
||||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.2-k3s.1
|
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.3-k3s1
|
||||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.2-k3s.1
|
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.3-k3s1
|
||||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.2-k3s.1
|
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.3-k3s1
|
||||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.2-k3s.1
|
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.3-k3s1
|
||||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.2-k3s.1
|
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.3-k3s1
|
||||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.2-k3s.1
|
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.3-k3s1
|
||||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.2-k3s.1
|
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.3-k3s1
|
||||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.2-k3s.1
|
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.3-k3s1
|
||||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.2-k3s.1
|
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.3-k3s1
|
||||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.2-k3s.1
|
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.3-k3s1
|
||||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.2-k3s.1
|
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.3-k3s1
|
||||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.2-k3s.1
|
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.3-k3s1
|
||||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.2-k3s.1
|
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.3-k3s1
|
||||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.2-k3s.1
|
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.3-k3s1
|
||||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.2-k3s.1
|
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.3-k3s1
|
||||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.2-k3s.1
|
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.3-k3s1
|
||||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.2-k3s.1
|
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.3-k3s1
|
||||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.2-k3s.1
|
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.3-k3s1
|
||||||
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
92
go.sum
92
go.sum
|
@ -182,8 +182,6 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz
|
||||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34 h1:8GFZB1KesbMy2X2zTiJyAuwCow+U1GT0ueD42p59y4k=
|
|
||||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
|
|
||||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
|
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
|
||||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||||
|
@ -641,49 +639,49 @@ github.com/rancher/helm-controller v0.6.0 h1:nFptBZFWpHga65M6bP04BZGLlzeMgezAXds
|
||||||
github.com/rancher/helm-controller v0.6.0/go.mod h1:ZylsxIMGNADRPRNW+NiBWhrwwks9vnKLQiCHYWb6Bi0=
|
github.com/rancher/helm-controller v0.6.0/go.mod h1:ZylsxIMGNADRPRNW+NiBWhrwwks9vnKLQiCHYWb6Bi0=
|
||||||
github.com/rancher/kine v0.4.0 h1:1IhWy3TzjExG8xnj46eyUEWdzqNAD1WrgL4eEBKm6Uc=
|
github.com/rancher/kine v0.4.0 h1:1IhWy3TzjExG8xnj46eyUEWdzqNAD1WrgL4eEBKm6Uc=
|
||||||
github.com/rancher/kine v0.4.0/go.mod h1:IImtCJ68AIkE+VY/kUI0NkyJL5q5WzO8QvMsSXqbrpA=
|
github.com/rancher/kine v0.4.0/go.mod h1:IImtCJ68AIkE+VY/kUI0NkyJL5q5WzO8QvMsSXqbrpA=
|
||||||
github.com/rancher/kubernetes v1.18.2-k3s.1 h1:LhWNObWF7dL/+T57LkYpuRKtsCBpt0P5G6dRVFG+Ncs=
|
github.com/rancher/kubernetes v1.18.3-k3s1 h1:QYh2MY+odOzBQedwClFdX1tRtYQADaFRWi+etbBJvuU=
|
||||||
github.com/rancher/kubernetes v1.18.2-k3s.1/go.mod h1:z8xjOOO1Ljz+TaHpOxVGC7cxtF32TesIamoQ+BZrVS0=
|
github.com/rancher/kubernetes v1.18.3-k3s1/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.2-k3s.1 h1:tYDY9g8+xLwUcsG9T6Xg7cBkO/vgU6yv7cQKqUN6NDE=
|
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.3-k3s1 h1:wNUROW7IOAbW0pCNtdKvDWIRUKE5pk3jffttSMyGn8s=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.2-k3s.1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
|
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.3-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.2-k3s.1 h1:mjOCIsG8WfV2LROU8xLVGEZ9G5uWrD/xHNc87rLPss8=
|
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.3-k3s1 h1:arrGaUaK4WJ/x0TMSw7wxMDlK+akOfq2Yk4MjMdnkqQ=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.2-k3s.1/go.mod h1:tMuEHO85+WtdJsLBJ1U4bh7oB23v/D4vP0BtL39qxM4=
|
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.3-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.2-k3s.1 h1:w2RnTwBNOi1QHYFoXbFLXx3Gaw3pPbplxbUPpl76hjw=
|
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.3-k3s1 h1:3rwFiKyQ7FL2hdqQNdJnP0BaM0a8L1tmGf+h1Nrqeno=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.2-k3s.1/go.mod h1:3Y3wDqxL/YFcgzyFD7r80Z6lMxpRJb/V+dds+b7A2NE=
|
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.3-k3s1/go.mod h1:0LbhSvBf6oDO/G0IsPYTC3eGykX9kRjGqE1+90am7Pg=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.2-k3s.1 h1:/6mV1HGv/QoAjFCPlSZfkdeJagmT8gYwiaOsXxjTZEM=
|
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.3-k3s1 h1:Ai2duIKWpX9IxW/sQL1ojbAX9KcJ9TPLQsogR9vafJA=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.2-k3s.1/go.mod h1:SeOQ7d1nUCULR978oKuSossKyGzova3DlaXEa1zJ1ns=
|
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.3-k3s1/go.mod h1:wYoVKxMBc/Gtl3o5eEhoIy1iS0Zw8kLYIak9mud65gg=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.2-k3s.1 h1:SlXTYNBxaWbxSPyHNRQ/epxqixUiokY04Wh+8gBYTXA=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.3-k3s1 h1:TQrvULLk+ESOptqV09QK6zzPu/IV7kJSxLTa9c5OTbE=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.2-k3s.1/go.mod h1:M8WtUx89NzNZ4Qx/1diDW/1TSs2Pv9J6//dIYvvtwSs=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.3-k3s1/go.mod h1:e0a+/gPy7PnNaRJHZz5E3lqfMsiJ17sSfvktHyipb3I=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.2-k3s.1 h1:qCJO8jfGrmJk7Wn8jfqekOua5PizO/joSQUB89vxLB0=
|
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.3-k3s1 h1:P3kIcwlm5w/XW8HgpyOYxOm70ZfZEtZm3xpHuOnlx6M=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.2-k3s.1/go.mod h1:Ck7kQmlFASfY0SaqYH1NwUrxeuAipkIbnuHi642eQ+I=
|
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.3-k3s1/go.mod h1:Ck7kQmlFASfY0SaqYH1NwUrxeuAipkIbnuHi642eQ+I=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.2-k3s.1 h1:PmV2L98GjPbD+0UvMR//4I8DiEraMOEl7fq65OGd4hI=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.3-k3s1 h1:czS0txmHj7i7dRqppu6ekwFigMsZUHMMmaSySuRcQSE=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.2-k3s.1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.3-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.2-k3s.1 h1:xTdvOPd4qckfxaqE0vYTdcVhFlYFN7bcS7xg1bnq9Y4=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.3-k3s1 h1:wjE5mqAyInJ33cx0St7jYWdy97O5hRBlxysQCH7kvU4=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.2-k3s.1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.3-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.2-k3s.1 h1:UMkQrPLLHpAH+jKNtMBIZw1i2wSuNSgxu7G48WLsoi0=
|
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.3-k3s1 h1:SBCvaudegFLUmSl0rfariVQjGhsH/A0AV2rq8ys3IU4=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.2-k3s.1/go.mod h1:Yai6SRJt/nb3VvQw4jKKZBtXRJF/OrswWmfjyF6FqP0=
|
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.3-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.2-k3s.1 h1:kZyprzrCOdYg1GdB4OPOu7ie2Zyw9ocO6Fa3iG2BhKc=
|
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.3-k3s1 h1:/47POpHTRsfFNc0k2Ruq67vqOtPms5FA9TXo9ci6FZc=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.2-k3s.1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
|
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.3-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.2-k3s.1 h1:ULoh4AB2JiSHA3ELUD56zRh7cnL6/bU8I6AaBbRI/xo=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.3-k3s1 h1:/VofAOz4+KX9zda4+M8WnE5eDn82ezYyBz7HuzUoBq0=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.2-k3s.1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
|
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.3-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.2-k3s.1 h1:1rhSNnADx+2NMudlqoFC1cBjPLblQ0sZeKkiWIUvJZY=
|
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.3-k3s1 h1:+CsRXq96B0ThQuI0x0i975CBcRKYfrUlR4/s+h3vYxU=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.2-k3s.1/go.mod h1:BBW+zYPd9dNyl1qZ3U/coU05IW6AvRAbo3s86WKDfzU=
|
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.3-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.2-k3s.1 h1:j0DihywFZbNqzlEE9UikIXoynvLumJFZNHcPrYTr63E=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.3-k3s1 h1:qDmjyk7BoZO7TcGpU6YKOZVPn84628tC8s0r8Xz/6M0=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.2-k3s.1/go.mod h1:EY3DrCVVj6X1xeVtHF/0lht5TZK9YhKLyfe6QBD3QvE=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.3-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.2-k3s.1 h1:FEyiGSFRKYejw8aRPbOTfIVukL0DkwhgdfmN36zQrBo=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.3-k3s1 h1:Y9ySTyuhR84dJaYzwBHmKxBtHQ2uWIoP9VL4iYCvwUM=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.2-k3s.1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.3-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.2-k3s.1 h1:fHJ7O2jLBQhDnw7ahecdtmx1l3mJ25fwWtlq3cOPrxw=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.3-k3s1 h1:K1sU4yqMdWaCGj4jEusK+PFowCCUuhTiH3jUriQD5WU=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.2-k3s.1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.3-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.2-k3s.1 h1:7ZGcqlwBSyLMLVT9r7F9jHRc+dhnlumrHYfblZilkl4=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.3-k3s1 h1:NqvNsqKpXJ7ZAFLOmxj6gvRktx0Oga9DH0M979za6uI=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.2-k3s.1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.3-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.2-k3s.1 h1:oNlYS9R/bsVnlUIeXrbJAxvNPlqhqksJZAoA4eneAdc=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.3-k3s1 h1:j4l5XcOKNqDtqIMrdDJLQtO+4LAcaojGa/etG6lv354=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.2-k3s.1/go.mod h1:lLLodYzjtaBEMebmdtYz2Sh+X8/3DB79imNypXbONsE=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.3-k3s1/go.mod h1:eosbAJR16uuWsgirnmlt31NV+ZwZLQsMNbxiRZYbco8=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.2-k3s.1 h1:Fl7NvvCzMDsZWYIh2F3MzQB+EPl7Xh0TTFTAw6SZNbo=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.3-k3s1 h1:qqKfrPB2ghGqf/ElQrVmZaVvm+/DSupWAApEe4Zk5Uk=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.2-k3s.1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
|
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.3-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.2-k3s.1 h1:nldhxCsspFtJPzus/aeLcednyDvAesVgu/XIE5Qa6/8=
|
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.3-k3s1 h1:xb3ZtwF3emE38qi8XWjEw+ES4WKe3k4B4Sr8YGFWEHo=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.2-k3s.1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
|
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.3-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.2-k3s.1 h1:ha8xCCbv3iPsXg+TjB+ZHHxxRyuiWWB9bgTDkgHmLCk=
|
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.3-k3s1 h1:r7qvKjbV7XHI3W9a8Jhzsiujrp7d76USez5i1LZNfYc=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.2-k3s.1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
|
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.3-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
|
||||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.2-k3s.1/go.mod h1:gpiIUEAyQvSEXKbsH2taOEzkrHXvoZwHuArWgR+DpG8=
|
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.3-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
|
||||||
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
|
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
|
||||||
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
|
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
|
||||||
github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI=
|
github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI=
|
||||||
|
@ -1067,8 +1065,8 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||||
k8s.io/kube-openapi v0.0.0-20190502190224-411b2483e503/go.mod h1:iU+ZGYsNlvU9XKUSso6SQfKTCCw7lFduMZy26Mgr2Fw=
|
k8s.io/kube-openapi v0.0.0-20190502190224-411b2483e503/go.mod h1:iU+ZGYsNlvU9XKUSso6SQfKTCCw7lFduMZy26Mgr2Fw=
|
||||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
|
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
|
||||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||||
k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8=
|
k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8=
|
||||||
k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI=
|
k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI=
|
||||||
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||||
|
|
|
@ -66,11 +66,36 @@ func Unmarshal(data []byte, v interface{}) error {
|
||||||
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
|
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
|
||||||
return convertSliceNumbers(*v, 0)
|
return convertSliceNumbers(*v, 0)
|
||||||
|
|
||||||
|
case *interface{}:
|
||||||
|
// Build a decoder from the given data
|
||||||
|
decoder := json.NewDecoder(bytes.NewBuffer(data))
|
||||||
|
// Preserve numbers, rather than casting to float64 automatically
|
||||||
|
decoder.UseNumber()
|
||||||
|
// Run the decode
|
||||||
|
if err := decoder.Decode(v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
|
||||||
|
return convertInterfaceNumbers(v, 0)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return json.Unmarshal(data, v)
|
return json.Unmarshal(data, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func convertInterfaceNumbers(v *interface{}, depth int) error {
|
||||||
|
var err error
|
||||||
|
switch v2 := (*v).(type) {
|
||||||
|
case json.Number:
|
||||||
|
*v, err = convertNumber(v2)
|
||||||
|
case map[string]interface{}:
|
||||||
|
err = convertMapNumbers(v2, depth+1)
|
||||||
|
case []interface{}:
|
||||||
|
err = convertSliceNumbers(v2, depth+1)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
|
// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
|
||||||
// values which are map[string]interface{} or []interface{} are recursively visited
|
// values which are map[string]interface{} or []interface{} are recursively visited
|
||||||
func convertMapNumbers(m map[string]interface{}, depth int) error {
|
func convertMapNumbers(m map[string]interface{}, depth int) error {
|
||||||
|
|
|
@ -286,8 +286,9 @@ func contextForChannel(parentCh <-chan struct{}) (context.Context, context.Cance
|
||||||
}
|
}
|
||||||
|
|
||||||
// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides
|
// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides
|
||||||
// an interface to return a timer for backoff, and caller shall backoff until Timer.C returns. If the second Backoff()
|
// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff()
|
||||||
// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained.
|
// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in
|
||||||
|
// undetermined behavior.
|
||||||
// The BackoffManager is supposed to be called in a single-threaded environment.
|
// The BackoffManager is supposed to be called in a single-threaded environment.
|
||||||
type BackoffManager interface {
|
type BackoffManager interface {
|
||||||
Backoff() clock.Timer
|
Backoff() clock.Timer
|
||||||
|
@ -317,7 +318,7 @@ func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Du
|
||||||
Steps: math.MaxInt32,
|
Steps: math.MaxInt32,
|
||||||
Cap: maxBackoff,
|
Cap: maxBackoff,
|
||||||
},
|
},
|
||||||
backoffTimer: c.NewTimer(0),
|
backoffTimer: nil,
|
||||||
initialBackoff: initBackoff,
|
initialBackoff: initBackoff,
|
||||||
lastBackoffStart: c.Now(),
|
lastBackoffStart: c.Now(),
|
||||||
backoffResetDuration: resetDuration,
|
backoffResetDuration: resetDuration,
|
||||||
|
@ -334,9 +335,14 @@ func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration {
|
||||||
return b.backoff.Step()
|
return b.backoff.Step()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for backoff.
|
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff.
|
||||||
|
// The returned timer must be drained before calling Backoff() the second time
|
||||||
func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
|
func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
|
||||||
b.backoffTimer.Reset(b.getNextBackoff())
|
if b.backoffTimer == nil {
|
||||||
|
b.backoffTimer = b.clock.NewTimer(b.getNextBackoff())
|
||||||
|
} else {
|
||||||
|
b.backoffTimer.Reset(b.getNextBackoff())
|
||||||
|
}
|
||||||
return b.backoffTimer
|
return b.backoffTimer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -354,7 +360,7 @@ func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.C
|
||||||
clock: c,
|
clock: c,
|
||||||
duration: duration,
|
duration: duration,
|
||||||
jitter: jitter,
|
jitter: jitter,
|
||||||
backoffTimer: c.NewTimer(0),
|
backoffTimer: nil,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,8 +372,15 @@ func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration {
|
||||||
return jitteredPeriod
|
return jitteredPeriod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff.
|
||||||
|
// The returned timer must be drained before calling Backoff() the second time
|
||||||
func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
|
func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
|
||||||
j.backoffTimer.Reset(j.getNextBackoff())
|
backoff := j.getNextBackoff()
|
||||||
|
if j.backoffTimer == nil {
|
||||||
|
j.backoffTimer = j.clock.NewTimer(backoff)
|
||||||
|
} else {
|
||||||
|
j.backoffTimer.Reset(backoff)
|
||||||
|
}
|
||||||
return j.backoffTimer
|
return j.backoffTimer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -682,6 +682,7 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler {
|
||||||
if c.SecureServing != nil && !c.SecureServing.DisableHTTP2 && c.GoawayChance > 0 {
|
if c.SecureServing != nil && !c.SecureServing.DisableHTTP2 && c.GoawayChance > 0 {
|
||||||
handler = genericfilters.WithProbabilisticGoaway(handler, c.GoawayChance)
|
handler = genericfilters.WithProbabilisticGoaway(handler, c.GoawayChance)
|
||||||
}
|
}
|
||||||
|
handler = genericapifilters.WithCacheControl(handler)
|
||||||
handler = genericfilters.WithPanicRecovery(handler)
|
handler = genericfilters.WithPanicRecovery(handler)
|
||||||
return handler
|
return handler
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,8 +3,8 @@ package version
|
||||||
var (
|
var (
|
||||||
gitMajor = "1"
|
gitMajor = "1"
|
||||||
gitMinor = "18"
|
gitMinor = "18"
|
||||||
gitVersion = "v1.18.2-k3s.1"
|
gitVersion = "v1.18.3-k3s1"
|
||||||
gitCommit = "3d7d34a23ec464c08b81486aeca0b7d1bb6e044c"
|
gitCommit = "e3eaf6c5726840b2e31ea2b0a2ea1c2d40ffed2a"
|
||||||
gitTreeState = "clean"
|
gitTreeState = "clean"
|
||||||
buildDate = "2020-04-19T05:33:19Z"
|
buildDate = "2020-05-26T21:45:32Z"
|
||||||
)
|
)
|
||||||
|
|
|
@ -35,7 +35,7 @@ import (
|
||||||
var (
|
var (
|
||||||
// ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields
|
// ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields
|
||||||
// DEPRECATED will be replaced
|
// DEPRECATED will be replaced
|
||||||
ClusterDefaults = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")}
|
ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()}
|
||||||
// DefaultClientConfig represents the legacy behavior of this package for defaulting
|
// DefaultClientConfig represents the legacy behavior of this package for defaulting
|
||||||
// DEPRECATED will be replace
|
// DEPRECATED will be replace
|
||||||
DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{
|
DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{
|
||||||
|
@ -43,6 +43,15 @@ var (
|
||||||
}, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}}
|
}, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// getDefaultServer returns a default setting for DefaultClientConfig
|
||||||
|
// DEPRECATED
|
||||||
|
func getDefaultServer() string {
|
||||||
|
if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 {
|
||||||
|
return server
|
||||||
|
}
|
||||||
|
return "http://localhost:8080"
|
||||||
|
}
|
||||||
|
|
||||||
// ClientConfig is used to make it easy to get an api server client
|
// ClientConfig is used to make it easy to get an api server client
|
||||||
type ClientConfig interface {
|
type ClientConfig interface {
|
||||||
// RawConfig returns the merged result of all overrides
|
// RawConfig returns the merged result of all overrides
|
||||||
|
|
|
@ -88,6 +88,9 @@ func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if cml.cm.Annotations == nil {
|
||||||
|
cml.cm.Annotations = make(map[string]string)
|
||||||
|
}
|
||||||
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
|
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -172,8 +172,8 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc
|
||||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
|
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
|
||||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
||||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||||
|
|
|
@ -3,8 +3,8 @@ package version
|
||||||
var (
|
var (
|
||||||
gitMajor = "1"
|
gitMajor = "1"
|
||||||
gitMinor = "18"
|
gitMinor = "18"
|
||||||
gitVersion = "v1.18.2-k3s.1"
|
gitVersion = "v1.18.3-k3s1"
|
||||||
gitCommit = "3d7d34a23ec464c08b81486aeca0b7d1bb6e044c"
|
gitCommit = "e3eaf6c5726840b2e31ea2b0a2ea1c2d40ffed2a"
|
||||||
gitTreeState = "clean"
|
gitTreeState = "clean"
|
||||||
buildDate = "2020-04-19T05:33:19Z"
|
buildDate = "2020-05-26T21:45:32Z"
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,6 +2,6 @@
|
||||||
|
|
||||||
Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes.
|
Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes.
|
||||||
|
|
||||||
This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/csi-api](https://git.k8s.io/kubernetes/staging/src/k8s.io/csi-api) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot).
|
This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/csi-translation-lib](https://git.k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot).
|
||||||
|
|
||||||
Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/sig-architecture/staging.md) for more information.
|
Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/sig-architecture/staging.md) for more information.
|
||||||
|
|
|
@ -5,9 +5,11 @@ module k8s.io/csi-translation-lib
|
||||||
go 1.13
|
go 1.13
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/stretchr/testify v1.4.0
|
||||||
k8s.io/api v0.0.0
|
k8s.io/api v0.0.0
|
||||||
k8s.io/apimachinery v0.0.0
|
k8s.io/apimachinery v0.0.0
|
||||||
k8s.io/cloud-provider v0.0.0
|
k8s.io/cloud-provider v0.0.0
|
||||||
|
k8s.io/klog v1.0.0
|
||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
replace (
|
||||||
|
|
|
@ -156,7 +156,7 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc
|
||||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
||||||
|
|
|
@ -19,6 +19,7 @@ go_library(
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
"//staging/src/k8s.io/cloud-provider/volume:go_default_library",
|
"//staging/src/k8s.io/cloud-provider/volume:go_default_library",
|
||||||
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -49,5 +50,7 @@ go_test(
|
||||||
deps = [
|
deps = [
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -110,22 +110,23 @@ func (t *azureDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
|
||||||
return nil, fmt.Errorf("pv is nil or Azure Disk source not defined on pv")
|
return nil, fmt.Errorf("pv is nil or Azure Disk source not defined on pv")
|
||||||
}
|
}
|
||||||
|
|
||||||
azureSource := pv.Spec.PersistentVolumeSource.AzureDisk
|
var (
|
||||||
|
azureSource = pv.Spec.PersistentVolumeSource.AzureDisk
|
||||||
|
|
||||||
// refer to https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md
|
// refer to https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md
|
||||||
csiSource := &v1.CSIPersistentVolumeSource{
|
csiSource = &v1.CSIPersistentVolumeSource{
|
||||||
Driver: AzureDiskDriverName,
|
Driver: AzureDiskDriverName,
|
||||||
VolumeHandle: azureSource.DataDiskURI,
|
VolumeAttributes: map[string]string{azureDiskKind: "Managed"},
|
||||||
ReadOnly: *azureSource.ReadOnly,
|
VolumeHandle: azureSource.DataDiskURI,
|
||||||
FSType: *azureSource.FSType,
|
}
|
||||||
VolumeAttributes: map[string]string{azureDiskKind: "Managed"},
|
)
|
||||||
}
|
|
||||||
|
|
||||||
if azureSource.CachingMode != nil {
|
if azureSource.CachingMode != nil {
|
||||||
csiSource.VolumeAttributes[azureDiskCachingMode] = string(*azureSource.CachingMode)
|
csiSource.VolumeAttributes[azureDiskCachingMode] = string(*azureSource.CachingMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
if azureSource.FSType != nil {
|
if azureSource.FSType != nil {
|
||||||
|
csiSource.FSType = *azureSource.FSType
|
||||||
csiSource.VolumeAttributes[azureDiskFSType] = *azureSource.FSType
|
csiSource.VolumeAttributes[azureDiskFSType] = *azureSource.FSType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,9 +134,12 @@ func (t *azureDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
|
||||||
csiSource.VolumeAttributes[azureDiskKind] = string(*azureSource.Kind)
|
csiSource.VolumeAttributes[azureDiskKind] = string(*azureSource.Kind)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if azureSource.ReadOnly != nil {
|
||||||
|
csiSource.ReadOnly = *azureSource.ReadOnly
|
||||||
|
}
|
||||||
|
|
||||||
pv.Spec.PersistentVolumeSource.AzureDisk = nil
|
pv.Spec.PersistentVolumeSource.AzureDisk = nil
|
||||||
pv.Spec.PersistentVolumeSource.CSI = csiSource
|
pv.Spec.PersistentVolumeSource.CSI = csiSource
|
||||||
pv.Spec.AccessModes = backwardCompatibleAccessModes(pv.Spec.AccessModes)
|
|
||||||
|
|
||||||
return pv, nil
|
return pv, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,11 +18,13 @@ package plugins
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1"
|
storage "k8s.io/api/storage/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/klog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -32,14 +34,19 @@ const (
|
||||||
AzureFileInTreePluginName = "kubernetes.io/azure-file"
|
AzureFileInTreePluginName = "kubernetes.io/azure-file"
|
||||||
|
|
||||||
separator = "#"
|
separator = "#"
|
||||||
volumeIDTemplate = "%s#%s#%s"
|
volumeIDTemplate = "%s#%s#%s#%s"
|
||||||
// Parameter names defined in azure file CSI driver, refer to
|
// Parameter names defined in azure file CSI driver, refer to
|
||||||
// https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
|
// https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
|
||||||
azureFileShareName = "shareName"
|
azureFileShareName = "shareName"
|
||||||
|
|
||||||
|
secretNameTemplate = "azure-storage-account-%s-secret"
|
||||||
|
defaultSecretNamespace = "default"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ InTreePlugin = &azureFileCSITranslator{}
|
var _ InTreePlugin = &azureFileCSITranslator{}
|
||||||
|
|
||||||
|
var secretNameFormatRE = regexp.MustCompile(`azure-storage-account-(.+)-secret`)
|
||||||
|
|
||||||
// azureFileCSITranslator handles translation of PV spec from In-tree
|
// azureFileCSITranslator handles translation of PV spec from In-tree
|
||||||
// Azure File to CSI Azure File and vice versa
|
// Azure File to CSI Azure File and vice versa
|
||||||
type azureFileCSITranslator struct{}
|
type azureFileCSITranslator struct{}
|
||||||
|
@ -58,32 +65,41 @@ func (t *azureFileCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.St
|
||||||
// and converts the AzureFile source to a CSIPersistentVolumeSource
|
// and converts the AzureFile source to a CSIPersistentVolumeSource
|
||||||
func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) {
|
func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) {
|
||||||
if volume == nil || volume.AzureFile == nil {
|
if volume == nil || volume.AzureFile == nil {
|
||||||
return nil, fmt.Errorf("volume is nil or AWS EBS not defined on volume")
|
return nil, fmt.Errorf("volume is nil or Azure File not defined on volume")
|
||||||
}
|
}
|
||||||
|
|
||||||
azureSource := volume.AzureFile
|
azureSource := volume.AzureFile
|
||||||
|
accountName, err := getStorageAccountName(azureSource.SecretName)
|
||||||
|
if err != nil {
|
||||||
|
klog.Warningf("getStorageAccountName(%s) returned with error: %v", azureSource.SecretName, err)
|
||||||
|
accountName = azureSource.SecretName
|
||||||
|
}
|
||||||
|
|
||||||
pv := &v1.PersistentVolume{
|
var (
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
pv = &v1.PersistentVolume{
|
||||||
// Must be unique per disk as it is used as the unique part of the
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
// staging path
|
// Must be unique per disk as it is used as the unique part of the
|
||||||
Name: fmt.Sprintf("%s-%s", AzureFileDriverName, azureSource.ShareName),
|
// staging path
|
||||||
},
|
Name: fmt.Sprintf("%s-%s", AzureFileDriverName, azureSource.ShareName),
|
||||||
Spec: v1.PersistentVolumeSpec{
|
},
|
||||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
CSI: &v1.CSIPersistentVolumeSource{
|
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||||
VolumeHandle: fmt.Sprintf(volumeIDTemplate, "", azureSource.SecretName, azureSource.ShareName),
|
CSI: &v1.CSIPersistentVolumeSource{
|
||||||
ReadOnly: azureSource.ReadOnly,
|
Driver: AzureFileDriverName,
|
||||||
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
|
VolumeHandle: fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, ""),
|
||||||
NodePublishSecretRef: &v1.SecretReference{
|
ReadOnly: azureSource.ReadOnly,
|
||||||
Name: azureSource.ShareName,
|
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
|
||||||
Namespace: "default",
|
NodeStageSecretRef: &v1.SecretReference{
|
||||||
|
Name: azureSource.SecretName,
|
||||||
|
Namespace: defaultSecretNamespace,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany},
|
||||||
},
|
},
|
||||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany},
|
}
|
||||||
},
|
)
|
||||||
}
|
|
||||||
return pv, nil
|
return pv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,23 +111,33 @@ func (t *azureFileCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
|
||||||
}
|
}
|
||||||
|
|
||||||
azureSource := pv.Spec.PersistentVolumeSource.AzureFile
|
azureSource := pv.Spec.PersistentVolumeSource.AzureFile
|
||||||
|
accountName, err := getStorageAccountName(azureSource.SecretName)
|
||||||
volumeID := fmt.Sprintf(volumeIDTemplate, "", azureSource.SecretName, azureSource.ShareName)
|
if err != nil {
|
||||||
// refer to https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
|
klog.Warningf("getStorageAccountName(%s) returned with error: %v", azureSource.SecretName, err)
|
||||||
csiSource := &v1.CSIPersistentVolumeSource{
|
accountName = azureSource.SecretName
|
||||||
VolumeHandle: volumeID,
|
|
||||||
ReadOnly: azureSource.ReadOnly,
|
|
||||||
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
|
|
||||||
}
|
}
|
||||||
|
volumeID := fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, "")
|
||||||
|
|
||||||
csiSource.NodePublishSecretRef = &v1.SecretReference{
|
var (
|
||||||
Name: azureSource.ShareName,
|
// refer to https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
|
||||||
Namespace: *azureSource.SecretNamespace,
|
csiSource = &v1.CSIPersistentVolumeSource{
|
||||||
|
Driver: AzureFileDriverName,
|
||||||
|
NodeStageSecretRef: &v1.SecretReference{
|
||||||
|
Name: azureSource.SecretName,
|
||||||
|
Namespace: defaultSecretNamespace,
|
||||||
|
},
|
||||||
|
ReadOnly: azureSource.ReadOnly,
|
||||||
|
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
|
||||||
|
VolumeHandle: volumeID,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if azureSource.SecretNamespace != nil {
|
||||||
|
csiSource.NodeStageSecretRef.Namespace = *azureSource.SecretNamespace
|
||||||
}
|
}
|
||||||
|
|
||||||
pv.Spec.PersistentVolumeSource.AzureFile = nil
|
pv.Spec.PersistentVolumeSource.AzureFile = nil
|
||||||
pv.Spec.PersistentVolumeSource.CSI = csiSource
|
pv.Spec.PersistentVolumeSource.CSI = csiSource
|
||||||
pv.Spec.AccessModes = backwardCompatibleAccessModes(pv.Spec.AccessModes)
|
|
||||||
|
|
||||||
return pv, nil
|
return pv, nil
|
||||||
}
|
}
|
||||||
|
@ -129,22 +155,21 @@ func (t *azureFileCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume)
|
||||||
ReadOnly: csiSource.ReadOnly,
|
ReadOnly: csiSource.ReadOnly,
|
||||||
}
|
}
|
||||||
|
|
||||||
if csiSource.NodePublishSecretRef != nil && csiSource.NodePublishSecretRef.Name != "" {
|
if csiSource.NodeStageSecretRef != nil && csiSource.NodeStageSecretRef.Name != "" {
|
||||||
azureSource.SecretName = csiSource.NodePublishSecretRef.Name
|
azureSource.SecretName = csiSource.NodeStageSecretRef.Name
|
||||||
azureSource.SecretNamespace = &csiSource.NodePublishSecretRef.Namespace
|
azureSource.SecretNamespace = &csiSource.NodeStageSecretRef.Namespace
|
||||||
if csiSource.VolumeAttributes != nil {
|
if csiSource.VolumeAttributes != nil {
|
||||||
if shareName, ok := csiSource.VolumeAttributes[azureFileShareName]; ok {
|
if shareName, ok := csiSource.VolumeAttributes[azureFileShareName]; ok {
|
||||||
azureSource.ShareName = shareName
|
azureSource.ShareName = shareName
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
_, _, fileShareName, err := getFileShareInfo(csiSource.VolumeHandle)
|
_, storageAccount, fileShareName, _, err := getFileShareInfo(csiSource.VolumeHandle)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
azureSource.ShareName = fileShareName
|
azureSource.ShareName = fileShareName
|
||||||
// to-do: for dynamic provision scenario in CSI, it uses cluster's identity to get storage account key
|
azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount)
|
||||||
// secret for the file share is not created, we may create a serect here
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pv.Spec.CSI = nil
|
pv.Spec.CSI = nil
|
||||||
|
@ -182,12 +207,25 @@ func (t *azureFileCSITranslator) RepairVolumeHandle(volumeHandle, nodeID string)
|
||||||
}
|
}
|
||||||
|
|
||||||
// get file share info according to volume id, e.g.
|
// get file share info according to volume id, e.g.
|
||||||
// input: "rg#f5713de20cde511e8ba4900#pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41"
|
// input: "rg#f5713de20cde511e8ba4900#pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41#diskname.vhd"
|
||||||
// output: rg, f5713de20cde511e8ba4900, pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41
|
// output: rg, f5713de20cde511e8ba4900, pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41, diskname.vhd
|
||||||
func getFileShareInfo(id string) (string, string, string, error) {
|
func getFileShareInfo(id string) (string, string, string, string, error) {
|
||||||
segments := strings.Split(id, separator)
|
segments := strings.Split(id, separator)
|
||||||
if len(segments) < 3 {
|
if len(segments) < 3 {
|
||||||
return "", "", "", fmt.Errorf("error parsing volume id: %q, should at least contain two #", id)
|
return "", "", "", "", fmt.Errorf("error parsing volume id: %q, should at least contain two #", id)
|
||||||
}
|
}
|
||||||
return segments[0], segments[1], segments[2], nil
|
var diskName string
|
||||||
|
if len(segments) > 3 {
|
||||||
|
diskName = segments[3]
|
||||||
|
}
|
||||||
|
return segments[0], segments[1], segments[2], diskName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// get storage account name from secret name
|
||||||
|
func getStorageAccountName(secretName string) (string, error) {
|
||||||
|
matches := secretNameFormatRE.FindStringSubmatch(secretName)
|
||||||
|
if len(matches) != 2 {
|
||||||
|
return "", fmt.Errorf("could not get account name from %s, correct format: %s", secretName, secretNameFormatRE)
|
||||||
|
}
|
||||||
|
return matches[1], nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -210,7 +210,7 @@ func (item *primitiveItem) VisitPrimitive(schema *proto.Primitive) {
|
||||||
}
|
}
|
||||||
case proto.Number:
|
case proto.Number:
|
||||||
switch item.Kind {
|
switch item.Kind {
|
||||||
case proto.Number:
|
case proto.Integer, proto.Number:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case proto.String:
|
case proto.String:
|
||||||
|
|
|
@ -240,7 +240,7 @@ func (cnc *CloudNodeController) updateNodeAddress(ctx context.Context, node *v1.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeAddresses, err := getNodeAddressesByProviderIDOrName(ctx, instances, node)
|
nodeAddresses, err := getNodeAddressesByProviderIDOrName(ctx, instances, node.Spec.ProviderID, node.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Error getting node addresses for node %q: %v", node.Name, err)
|
klog.Errorf("Error getting node addresses for node %q: %v", node.Name, err)
|
||||||
return
|
return
|
||||||
|
@ -408,10 +408,14 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod
|
||||||
// All of the returned functions are idempotent, because they are used in a retry-if-conflict
|
// All of the returned functions are idempotent, because they are used in a retry-if-conflict
|
||||||
// loop, meaning they could get called multiple times.
|
// loop, meaning they could get called multiple times.
|
||||||
func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Context, node *v1.Node, instances cloudprovider.Instances) ([]nodeModifier, error) {
|
func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Context, node *v1.Node, instances cloudprovider.Instances) ([]nodeModifier, error) {
|
||||||
var nodeModifiers []nodeModifier
|
var (
|
||||||
|
nodeModifiers []nodeModifier
|
||||||
|
providerID string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
if node.Spec.ProviderID == "" {
|
if node.Spec.ProviderID == "" {
|
||||||
providerID, err := cloudprovider.GetInstanceProviderID(ctx, cnc.cloud, types.NodeName(node.Name))
|
providerID, err = cloudprovider.GetInstanceProviderID(ctx, cnc.cloud, types.NodeName(node.Name))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
nodeModifiers = append(nodeModifiers, func(n *v1.Node) {
|
nodeModifiers = append(nodeModifiers, func(n *v1.Node) {
|
||||||
if n.Spec.ProviderID == "" {
|
if n.Spec.ProviderID == "" {
|
||||||
|
@ -429,9 +433,11 @@ func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Co
|
||||||
// do not, the taint will be removed, and this will not be retried
|
// do not, the taint will be removed, and this will not be retried
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
providerID = node.Spec.ProviderID
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeAddresses, err := getNodeAddressesByProviderIDOrName(ctx, instances, node)
|
nodeAddresses, err := getNodeAddressesByProviderIDOrName(ctx, instances, providerID, node.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -444,7 +450,7 @@ func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Co
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if instanceType, err := getInstanceTypeByProviderIDOrName(ctx, instances, node); err != nil {
|
if instanceType, err := getInstanceTypeByProviderIDOrName(ctx, instances, providerID, node.Name); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if instanceType != "" {
|
} else if instanceType != "" {
|
||||||
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceType, instanceType)
|
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceType, instanceType)
|
||||||
|
@ -459,7 +465,7 @@ func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Co
|
||||||
}
|
}
|
||||||
|
|
||||||
if zones, ok := cnc.cloud.Zones(); ok {
|
if zones, ok := cnc.cloud.Zones(); ok {
|
||||||
zone, err := getZoneByProviderIDOrName(ctx, zones, node)
|
zone, err := getZoneByProviderIDOrName(ctx, zones, providerID, node.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err)
|
return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -532,11 +538,11 @@ func ensureNodeExistsByProviderID(ctx context.Context, instances cloudprovider.I
|
||||||
return instances.InstanceExistsByProviderID(ctx, providerID)
|
return instances.InstanceExistsByProviderID(ctx, providerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodeAddressesByProviderIDOrName(ctx context.Context, instances cloudprovider.Instances, node *v1.Node) ([]v1.NodeAddress, error) {
|
func getNodeAddressesByProviderIDOrName(ctx context.Context, instances cloudprovider.Instances, providerID, nodeName string) ([]v1.NodeAddress, error) {
|
||||||
nodeAddresses, err := instances.NodeAddressesByProviderID(ctx, node.Spec.ProviderID)
|
nodeAddresses, err := instances.NodeAddressesByProviderID(ctx, providerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
providerIDErr := err
|
providerIDErr := err
|
||||||
nodeAddresses, err = instances.NodeAddresses(ctx, types.NodeName(node.Name))
|
nodeAddresses, err = instances.NodeAddresses(ctx, types.NodeName(nodeName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error fetching node by provider ID: %v, and error by node name: %v", providerIDErr, err)
|
return nil, fmt.Errorf("error fetching node by provider ID: %v, and error by node name: %v", providerIDErr, err)
|
||||||
}
|
}
|
||||||
|
@ -577,11 +583,13 @@ func ensureNodeProvidedIPExists(node *v1.Node, nodeAddresses []v1.NodeAddress) (
|
||||||
return nodeIP, nodeIPExists
|
return nodeIP, nodeIPExists
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInstanceTypeByProviderIDOrName(ctx context.Context, instances cloudprovider.Instances, node *v1.Node) (string, error) {
|
// getInstanceTypeByProviderIDOrName will attempt to get the instance type of node using its providerID
|
||||||
instanceType, err := instances.InstanceTypeByProviderID(ctx, node.Spec.ProviderID)
|
// then it's name. If both attempts fail, an error is returned.
|
||||||
|
func getInstanceTypeByProviderIDOrName(ctx context.Context, instances cloudprovider.Instances, providerID, nodeName string) (string, error) {
|
||||||
|
instanceType, err := instances.InstanceTypeByProviderID(ctx, providerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
providerIDErr := err
|
providerIDErr := err
|
||||||
instanceType, err = instances.InstanceType(ctx, types.NodeName(node.Name))
|
instanceType, err = instances.InstanceType(ctx, types.NodeName(nodeName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("InstanceType: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
|
return "", fmt.Errorf("InstanceType: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
|
||||||
}
|
}
|
||||||
|
@ -590,12 +598,12 @@ func getInstanceTypeByProviderIDOrName(ctx context.Context, instances cloudprovi
|
||||||
}
|
}
|
||||||
|
|
||||||
// getZoneByProviderIDorName will attempt to get the zone of node using its providerID
|
// getZoneByProviderIDorName will attempt to get the zone of node using its providerID
|
||||||
// then it's name. If both attempts fail, an error is returned
|
// then it's name. If both attempts fail, an error is returned.
|
||||||
func getZoneByProviderIDOrName(ctx context.Context, zones cloudprovider.Zones, node *v1.Node) (cloudprovider.Zone, error) {
|
func getZoneByProviderIDOrName(ctx context.Context, zones cloudprovider.Zones, providerID, nodeName string) (cloudprovider.Zone, error) {
|
||||||
zone, err := zones.GetZoneByProviderID(ctx, node.Spec.ProviderID)
|
zone, err := zones.GetZoneByProviderID(ctx, providerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
providerIDErr := err
|
providerIDErr := err
|
||||||
zone, err = zones.GetZoneByNodeName(ctx, types.NodeName(node.Name))
|
zone, err = zones.GetZoneByNodeName(ctx, types.NodeName(nodeName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cloudprovider.Zone{}, fmt.Errorf("Zone: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
|
return cloudprovider.Zone{}, fmt.Errorf("Zone: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
|
||||||
}
|
}
|
||||||
|
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go
generated
vendored
|
@ -79,6 +79,8 @@ var _ ActualStateOfWorld = &actualStateOfWorld{}
|
||||||
type PluginInfo struct {
|
type PluginInfo struct {
|
||||||
SocketPath string
|
SocketPath string
|
||||||
Timestamp time.Time
|
Timestamp time.Time
|
||||||
|
Handler PluginHandler
|
||||||
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (asw *actualStateOfWorld) AddPlugin(pluginInfo PluginInfo) error {
|
func (asw *actualStateOfWorld) AddPlugin(pluginInfo PluginInfo) error {
|
||||||
|
|
|
@ -49,7 +49,7 @@ type OperationExecutor interface {
|
||||||
|
|
||||||
// UnregisterPlugin deregisters the given plugin using a handler in the given plugin handler map.
|
// UnregisterPlugin deregisters the given plugin using a handler in the given plugin handler map.
|
||||||
// It then updates the actual state of the world to reflect that.
|
// It then updates the actual state of the world to reflect that.
|
||||||
UnregisterPlugin(socketPath string, pluginHandlers map[string]cache.PluginHandler, actualStateOfWorld ActualStateOfWorldUpdater) error
|
UnregisterPlugin(pluginInfo cache.PluginInfo, actualStateOfWorld ActualStateOfWorldUpdater) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewOperationExecutor returns a new instance of OperationExecutor.
|
// NewOperationExecutor returns a new instance of OperationExecutor.
|
||||||
|
@ -105,12 +105,11 @@ func (oe *operationExecutor) RegisterPlugin(
|
||||||
}
|
}
|
||||||
|
|
||||||
func (oe *operationExecutor) UnregisterPlugin(
|
func (oe *operationExecutor) UnregisterPlugin(
|
||||||
socketPath string,
|
pluginInfo cache.PluginInfo,
|
||||||
pluginHandlers map[string]cache.PluginHandler,
|
|
||||||
actualStateOfWorld ActualStateOfWorldUpdater) error {
|
actualStateOfWorld ActualStateOfWorldUpdater) error {
|
||||||
generatedOperation :=
|
generatedOperation :=
|
||||||
oe.operationGenerator.GenerateUnregisterPluginFunc(socketPath, pluginHandlers, actualStateOfWorld)
|
oe.operationGenerator.GenerateUnregisterPluginFunc(pluginInfo, actualStateOfWorld)
|
||||||
|
|
||||||
return oe.pendingOperations.Run(
|
return oe.pendingOperations.Run(
|
||||||
socketPath, generatedOperation)
|
pluginInfo.SocketPath, generatedOperation)
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,8 +67,7 @@ type OperationGenerator interface {
|
||||||
|
|
||||||
// Generates the UnregisterPlugin function needed to perform the unregistration of a plugin
|
// Generates the UnregisterPlugin function needed to perform the unregistration of a plugin
|
||||||
GenerateUnregisterPluginFunc(
|
GenerateUnregisterPluginFunc(
|
||||||
socketPath string,
|
pluginInfo cache.PluginInfo,
|
||||||
pluginHandlers map[string]cache.PluginHandler,
|
|
||||||
actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error
|
actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,6 +114,8 @@ func (og *operationGenerator) GenerateRegisterPluginFunc(
|
||||||
err = actualStateOfWorldUpdater.AddPlugin(cache.PluginInfo{
|
err = actualStateOfWorldUpdater.AddPlugin(cache.PluginInfo{
|
||||||
SocketPath: socketPath,
|
SocketPath: socketPath,
|
||||||
Timestamp: timestamp,
|
Timestamp: timestamp,
|
||||||
|
Handler: handler,
|
||||||
|
Name: infoResp.Name,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("RegisterPlugin error -- failed to add plugin at socket %s, err: %v", socketPath, err)
|
klog.Errorf("RegisterPlugin error -- failed to add plugin at socket %s, err: %v", socketPath, err)
|
||||||
|
@ -133,35 +134,20 @@ func (og *operationGenerator) GenerateRegisterPluginFunc(
|
||||||
}
|
}
|
||||||
|
|
||||||
func (og *operationGenerator) GenerateUnregisterPluginFunc(
|
func (og *operationGenerator) GenerateUnregisterPluginFunc(
|
||||||
socketPath string,
|
pluginInfo cache.PluginInfo,
|
||||||
pluginHandlers map[string]cache.PluginHandler,
|
|
||||||
actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error {
|
actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error {
|
||||||
|
|
||||||
unregisterPluginFunc := func() error {
|
unregisterPluginFunc := func() error {
|
||||||
client, conn, err := dial(socketPath, dialTimeoutDuration)
|
if pluginInfo.Handler == nil {
|
||||||
if err != nil {
|
return fmt.Errorf("UnregisterPlugin error -- failed to get plugin handler for %s", pluginInfo.SocketPath)
|
||||||
return fmt.Errorf("UnregisterPlugin error -- dial failed at socket %s, err: %v", socketPath, err)
|
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
infoResp, err := client.GetInfo(ctx, ®isterapi.InfoRequest{})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("UnregisterPlugin error -- failed to get plugin info using RPC GetInfo at socket %s, err: %v", socketPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
handler, ok := pluginHandlers[infoResp.Type]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("UnregisterPlugin error -- no handler registered for plugin type: %s at socket %s", infoResp.Type, socketPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We remove the plugin to the actual state of world cache before calling a plugin consumer's Unregister handle
|
// We remove the plugin to the actual state of world cache before calling a plugin consumer's Unregister handle
|
||||||
// so that if we receive a register event during Register Plugin, we can process it as a Register call.
|
// so that if we receive a register event during Register Plugin, we can process it as a Register call.
|
||||||
actualStateOfWorldUpdater.RemovePlugin(socketPath)
|
actualStateOfWorldUpdater.RemovePlugin(pluginInfo.SocketPath)
|
||||||
|
|
||||||
handler.DeRegisterPlugin(infoResp.Name)
|
pluginInfo.Handler.DeRegisterPlugin(pluginInfo.Name)
|
||||||
|
|
||||||
|
klog.V(4).Infof("DeRegisterPlugin called for %s on %v", pluginInfo.Name, pluginInfo.Handler)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return unregisterPluginFunc
|
return unregisterPluginFunc
|
||||||
|
|
|
@ -127,7 +127,7 @@ func (rc *reconciler) reconcile() {
|
||||||
|
|
||||||
if unregisterPlugin {
|
if unregisterPlugin {
|
||||||
klog.V(5).Infof(registeredPlugin.GenerateMsgDetailed("Starting operationExecutor.UnregisterPlugin", ""))
|
klog.V(5).Infof(registeredPlugin.GenerateMsgDetailed("Starting operationExecutor.UnregisterPlugin", ""))
|
||||||
err := rc.operationExecutor.UnregisterPlugin(registeredPlugin.SocketPath, rc.getHandlers(), rc.actualStateOfWorld)
|
err := rc.operationExecutor.UnregisterPlugin(registeredPlugin, rc.actualStateOfWorld)
|
||||||
if err != nil &&
|
if err != nil &&
|
||||||
!goroutinemap.IsAlreadyExists(err) &&
|
!goroutinemap.IsAlreadyExists(err) &&
|
||||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||||
|
|
|
@ -335,6 +335,9 @@ func (sched *Scheduler) skipPodUpdate(pod *v1.Pod) bool {
|
||||||
// Annotations must be excluded for the reasons described in
|
// Annotations must be excluded for the reasons described in
|
||||||
// https://github.com/kubernetes/kubernetes/issues/52914.
|
// https://github.com/kubernetes/kubernetes/issues/52914.
|
||||||
p.Annotations = nil
|
p.Annotations = nil
|
||||||
|
// Same as above, when annotations are modified with ServerSideApply,
|
||||||
|
// ManagedFields may also change and must be excluded
|
||||||
|
p.ManagedFields = nil
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
assumedPodCopy, podCopy := f(assumedPod), f(pod)
|
assumedPodCopy, podCopy := f(assumedPod), f(pod)
|
||||||
|
|
|
@ -117,6 +117,7 @@ func calculateResourceAllocatableRequest(nodeInfo *schedulernodeinfo.NodeInfo, p
|
||||||
|
|
||||||
// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the
|
// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the
|
||||||
// PodOverhead feature is enabled, the Overhead is added to the result.
|
// PodOverhead feature is enabled, the Overhead is added to the result.
|
||||||
|
// podResourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead
|
||||||
func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||||
var podRequest int64
|
var podRequest int64
|
||||||
for i := range pod.Spec.Containers {
|
for i := range pod.Spec.Containers {
|
||||||
|
@ -125,11 +126,20 @@ func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||||
podRequest += value
|
podRequest += value
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i := range pod.Spec.InitContainers {
|
||||||
|
initContainer := &pod.Spec.InitContainers[i]
|
||||||
|
value := schedutil.GetNonzeroRequestForResource(resource, &initContainer.Resources.Requests)
|
||||||
|
if podRequest < value {
|
||||||
|
podRequest = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If Overhead is being utilized, add to the total requests for the pod
|
// If Overhead is being utilized, add to the total requests for the pod
|
||||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||||
if quantity, found := pod.Spec.Overhead[resource]; found {
|
if quantity, found := pod.Spec.Overhead[resource]; found {
|
||||||
podRequest += quantity.Value()
|
podRequest += quantity.Value()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return podRequest
|
return podRequest
|
||||||
}
|
}
|
||||||
|
|
|
@ -173,7 +173,10 @@ func (r *Resource) Add(rl v1.ResourceList) {
|
||||||
case v1.ResourcePods:
|
case v1.ResourcePods:
|
||||||
r.AllowedPodNumber += int(rQuant.Value())
|
r.AllowedPodNumber += int(rQuant.Value())
|
||||||
case v1.ResourceEphemeralStorage:
|
case v1.ResourceEphemeralStorage:
|
||||||
r.EphemeralStorage += rQuant.Value()
|
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
|
||||||
|
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk.
|
||||||
|
r.EphemeralStorage += rQuant.Value()
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
if v1helper.IsScalarResourceName(rName) {
|
if v1helper.IsScalarResourceName(rName) {
|
||||||
r.AddScalar(rName, rQuant.Value())
|
r.AddScalar(rName, rQuant.Value())
|
||||||
|
@ -565,21 +568,32 @@ func (n *NodeInfo) resetSlicesIfEmpty() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// resourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead
|
||||||
func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) {
|
func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) {
|
||||||
resPtr := &res
|
resPtr := &res
|
||||||
for _, c := range pod.Spec.Containers {
|
for _, c := range pod.Spec.Containers {
|
||||||
resPtr.Add(c.Resources.Requests)
|
resPtr.Add(c.Resources.Requests)
|
||||||
|
|
||||||
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&c.Resources.Requests)
|
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&c.Resources.Requests)
|
||||||
non0CPU += non0CPUReq
|
non0CPU += non0CPUReq
|
||||||
non0Mem += non0MemReq
|
non0Mem += non0MemReq
|
||||||
// No non-zero resources for GPUs or opaque resources.
|
// No non-zero resources for GPUs or opaque resources.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, ic := range pod.Spec.InitContainers {
|
||||||
|
resPtr.SetMaxResource(ic.Resources.Requests)
|
||||||
|
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&ic.Resources.Requests)
|
||||||
|
if non0CPU < non0CPUReq {
|
||||||
|
non0CPU = non0CPUReq
|
||||||
|
}
|
||||||
|
|
||||||
|
if non0Mem < non0MemReq {
|
||||||
|
non0Mem = non0MemReq
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If Overhead is being utilized, add to the total requests for the pod
|
// If Overhead is being utilized, add to the total requests for the pod
|
||||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||||
resPtr.Add(pod.Spec.Overhead)
|
resPtr.Add(pod.Spec.Overhead)
|
||||||
|
|
||||||
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
|
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
|
||||||
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
|
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
|
||||||
}
|
}
|
||||||
|
|
|
@ -592,6 +592,9 @@ func (sched *Scheduler) scheduleOne(ctx context.Context) {
|
||||||
// succeeds, the pod should get counted as a success the next time we try to
|
// succeeds, the pod should get counted as a success the next time we try to
|
||||||
// schedule it. (hopefully)
|
// schedule it. (hopefully)
|
||||||
metrics.PodScheduleFailures.Inc()
|
metrics.PodScheduleFailures.Inc()
|
||||||
|
} else if err == core.ErrNoNodesAvailable {
|
||||||
|
// No nodes available is counted as unschedulable rather than an error.
|
||||||
|
metrics.PodScheduleFailures.Inc()
|
||||||
} else {
|
} else {
|
||||||
klog.Errorf("error selecting node for pod: %v", err)
|
klog.Errorf("error selecting node for pod: %v", err)
|
||||||
metrics.PodScheduleErrors.Inc()
|
metrics.PodScheduleErrors.Inc()
|
||||||
|
|
|
@ -40,10 +40,12 @@ go_library(
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/v1/pod:go_default_library",
|
"//pkg/api/v1/pod:go_default_library",
|
||||||
"//pkg/apis/core/v1/helper:go_default_library",
|
"//pkg/apis/core/v1/helper:go_default_library",
|
||||||
|
"//pkg/features:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
"//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library",
|
"//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library",
|
||||||
"//vendor/k8s.io/klog:go_default_library",
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
],
|
],
|
||||||
|
|
|
@ -18,7 +18,9 @@ package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
// For each of these resources, a pod that doesn't request the resource explicitly
|
// For each of these resources, a pod that doesn't request the resource explicitly
|
||||||
|
@ -60,6 +62,11 @@ func GetNonzeroRequestForResource(resource v1.ResourceName, requests *v1.Resourc
|
||||||
}
|
}
|
||||||
return requests.Memory().Value()
|
return requests.Memory().Value()
|
||||||
case v1.ResourceEphemeralStorage:
|
case v1.ResourceEphemeralStorage:
|
||||||
|
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk.
|
||||||
|
if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
quantity, found := (*requests)[v1.ResourceEphemeralStorage]
|
quantity, found := (*requests)[v1.ResourceEphemeralStorage]
|
||||||
if !found {
|
if !found {
|
||||||
return 0
|
return 0
|
||||||
|
|
|
@ -17,10 +17,10 @@ go_test(
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||||
] + select({
|
] + select({
|
||||||
"@io_bazel_rules_go//go/platform:android": [
|
"@io_bazel_rules_go//go/platform:android": [
|
||||||
"//vendor/github.com/docker/libnetwork/ipvs:go_default_library",
|
"//third_party/forked/ipvs:go_default_library",
|
||||||
],
|
],
|
||||||
"@io_bazel_rules_go//go/platform:linux": [
|
"@io_bazel_rules_go//go/platform:linux": [
|
||||||
"//vendor/github.com/docker/libnetwork/ipvs:go_default_library",
|
"//third_party/forked/ipvs:go_default_library",
|
||||||
],
|
],
|
||||||
"//conditions:default": [],
|
"//conditions:default": [],
|
||||||
}),
|
}),
|
||||||
|
@ -39,7 +39,7 @@ go_library(
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||||
] + select({
|
] + select({
|
||||||
"@io_bazel_rules_go//go/platform:android": [
|
"@io_bazel_rules_go//go/platform:android": [
|
||||||
"//vendor/github.com/docker/libnetwork/ipvs:go_default_library",
|
"//third_party/forked/ipvs:go_default_library",
|
||||||
"//vendor/k8s.io/klog:go_default_library",
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -56,7 +56,7 @@ go_library(
|
||||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||||
],
|
],
|
||||||
"@io_bazel_rules_go//go/platform:linux": [
|
"@io_bazel_rules_go//go/platform:linux": [
|
||||||
"//vendor/github.com/docker/libnetwork/ipvs:go_default_library",
|
"//third_party/forked/ipvs:go_default_library",
|
||||||
"//vendor/k8s.io/klog:go_default_library",
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||||
],
|
],
|
||||||
|
|
|
@ -27,8 +27,8 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
libipvs "github.com/docker/libnetwork/ipvs"
|
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
|
libipvs "k8s.io/kubernetes/third_party/forked/ipvs"
|
||||||
utilexec "k8s.io/utils/exec"
|
utilexec "k8s.io/utils/exec"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||||
package csi
|
package csi
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
@ -24,8 +25,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
|
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
|
@ -227,7 +226,7 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
|
||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) &&
|
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) &&
|
||||||
utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) {
|
utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) {
|
||||||
// This function prevents Kubelet from posting Ready status until CSINodeInfo
|
// This function prevents Kubelet from posting Ready status until CSINode
|
||||||
// is both installed and initialized
|
// is both installed and initialized
|
||||||
if err := initializeCSINode(host, localNim); err != nil {
|
if err := initializeCSINode(host, localNim); err != nil {
|
||||||
return errors.New(log("failed to initialize CSINodeInfo: %v", err))
|
return errors.New(log("failed to initialize CSINodeInfo: %v", err))
|
||||||
|
@ -244,21 +243,28 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
|
||||||
func initializeCSINode(host volume.VolumeHost, nim nodeinfomanager.Interface) error {
|
func initializeCSINode(host volume.VolumeHost, nim nodeinfomanager.Interface) error {
|
||||||
kvh, ok := host.(volume.KubeletVolumeHost)
|
kvh, ok := host.(volume.KubeletVolumeHost)
|
||||||
if !ok {
|
if !ok {
|
||||||
klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINodeInfo initialization, not running on kubelet")
|
klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINode initialization, not running on kubelet")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
kubeClient := host.GetKubeClient()
|
kubeClient := host.GetKubeClient()
|
||||||
if kubeClient == nil {
|
if kubeClient == nil {
|
||||||
// Kubelet running in standalone mode. Skip CSINodeInfo initialization
|
// Kubelet running in standalone mode. Skip CSINode initialization
|
||||||
klog.Warning("Skipping CSINodeInfo initialization, kubelet running in standalone mode")
|
klog.Warning("Skipping CSINode initialization, kubelet running in standalone mode")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
kvh.SetKubeletError(errors.New("CSINodeInfo is not yet initialized"))
|
kvh.SetKubeletError(errors.New("CSINode is not yet initialized"))
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
// First wait indefinitely to talk to Kube APIServer
|
||||||
|
nodeName := host.GetNodeName()
|
||||||
|
err := waitForAPIServerForever(kubeClient, nodeName)
|
||||||
|
if err != nil {
|
||||||
|
klog.Fatalf("Failed to initialize CSINode while waiting for API server to report ok: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Backoff parameters tuned to retry over 140 seconds. Will fail and restart the Kubelet
|
// Backoff parameters tuned to retry over 140 seconds. Will fail and restart the Kubelet
|
||||||
// after max retry steps.
|
// after max retry steps.
|
||||||
initBackoff := wait.Backoff{
|
initBackoff := wait.Backoff{
|
||||||
|
@ -267,12 +273,12 @@ func initializeCSINode(host volume.VolumeHost, nim nodeinfomanager.Interface) er
|
||||||
Factor: 6.0,
|
Factor: 6.0,
|
||||||
Jitter: 0.1,
|
Jitter: 0.1,
|
||||||
}
|
}
|
||||||
err := wait.ExponentialBackoff(initBackoff, func() (bool, error) {
|
err = wait.ExponentialBackoff(initBackoff, func() (bool, error) {
|
||||||
klog.V(4).Infof("Initializing migrated drivers on CSINodeInfo")
|
klog.V(4).Infof("Initializing migrated drivers on CSINode")
|
||||||
err := nim.InitializeCSINodeWithAnnotation()
|
err := nim.InitializeCSINodeWithAnnotation()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
kvh.SetKubeletError(fmt.Errorf("Failed to initialize CSINodeInfo: %v", err))
|
kvh.SetKubeletError(fmt.Errorf("Failed to initialize CSINode: %v", err))
|
||||||
klog.Errorf("Failed to initialize CSINodeInfo: %v", err)
|
klog.Errorf("Failed to initialize CSINode: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -286,7 +292,7 @@ func initializeCSINode(host volume.VolumeHost, nim nodeinfomanager.Interface) er
|
||||||
// using CSI for all Migrated volume plugins. Then all the CSINode initialization
|
// using CSI for all Migrated volume plugins. Then all the CSINode initialization
|
||||||
// code can be dropped from Kubelet.
|
// code can be dropped from Kubelet.
|
||||||
// Kill the Kubelet process and allow it to restart to retry initialization
|
// Kill the Kubelet process and allow it to restart to retry initialization
|
||||||
klog.Fatalf("Failed to initialize CSINodeInfo after retrying")
|
klog.Fatalf("Failed to initialize CSINode after retrying: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
return nil
|
return nil
|
||||||
|
@ -918,3 +924,28 @@ func highestSupportedVersion(versions []string) (*utilversion.Version, error) {
|
||||||
}
|
}
|
||||||
return highestSupportedVersion, nil
|
return highestSupportedVersion, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// waitForAPIServerForever waits forever to get a CSINode instance as a proxy
|
||||||
|
// for a healthy APIServer
|
||||||
|
func waitForAPIServerForever(client clientset.Interface, nodeName types.NodeName) error {
|
||||||
|
var lastErr error
|
||||||
|
err := wait.PollImmediateInfinite(time.Second, func() (bool, error) {
|
||||||
|
// Get a CSINode from API server to make sure 1) kubelet can reach API server
|
||||||
|
// and 2) it has enough permissions. Kubelet may have restricted permissions
|
||||||
|
// when it's bootstrapping TLS.
|
||||||
|
// https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/
|
||||||
|
_, lastErr = client.StorageV1().CSINodes().Get(context.TODO(), string(nodeName), meta.GetOptions{})
|
||||||
|
if lastErr == nil || apierrors.IsNotFound(lastErr) {
|
||||||
|
// API server contacted
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
klog.V(2).Infof("Failed to contact API server when waiting for CSINode publishing: %s", lastErr)
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
// In theory this is unreachable, but just in case:
|
||||||
|
return fmt.Errorf("%v: %v", err, lastErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -397,16 +397,16 @@ func (nim *nodeInfoManager) InitializeCSINodeWithAnnotation() error {
|
||||||
return goerrors.New("error getting CSI client")
|
return goerrors.New("error getting CSI client")
|
||||||
}
|
}
|
||||||
|
|
||||||
var updateErrs []error
|
var lastErr error
|
||||||
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
|
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
|
||||||
if err := nim.tryInitializeCSINodeWithAnnotation(csiKubeClient); err != nil {
|
if lastErr = nim.tryInitializeCSINodeWithAnnotation(csiKubeClient); lastErr != nil {
|
||||||
updateErrs = append(updateErrs, err)
|
klog.V(2).Infof("Failed to publish CSINode: %v", lastErr)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error updating CSINode annotation: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs))
|
return fmt.Errorf("error updating CSINode annotation: %v; caused by: %v", err, lastErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -0,0 +1,49 @@
|
||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"constants.go",
|
||||||
|
"ipvs.go",
|
||||||
|
"netlink.go",
|
||||||
|
],
|
||||||
|
importpath = "k8s.io/kubernetes/third_party/forked/ipvs",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = select({
|
||||||
|
"@io_bazel_rules_go//go/platform:android": [
|
||||||
|
"//vendor/github.com/sirupsen/logrus:go_default_library",
|
||||||
|
"//vendor/github.com/vishvananda/netlink/nl:go_default_library",
|
||||||
|
"//vendor/github.com/vishvananda/netns:go_default_library",
|
||||||
|
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||||
|
],
|
||||||
|
"@io_bazel_rules_go//go/platform:linux": [
|
||||||
|
"//vendor/github.com/sirupsen/logrus:go_default_library",
|
||||||
|
"//vendor/github.com/vishvananda/netlink/nl:go_default_library",
|
||||||
|
"//vendor/github.com/vishvananda/netns:go_default_library",
|
||||||
|
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||||
|
],
|
||||||
|
"//conditions:default": [],
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "go_default_test",
|
||||||
|
srcs = ["netlink_test.go"],
|
||||||
|
embed = [":go_default_library"],
|
||||||
|
)
|
|
@ -1,5 +1,8 @@
|
||||||
// +build linux
|
// +build linux
|
||||||
|
|
||||||
|
// Code and documentation copyright 2015 Docker, inc.
|
||||||
|
// Code released under the Apache 2.0 license. Docs released under Creative commons.
|
||||||
|
|
||||||
package ipvs
|
package ipvs
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -144,6 +147,17 @@ const (
|
||||||
// a statically assigned hash table by their source IP
|
// a statically assigned hash table by their source IP
|
||||||
// addresses.
|
// addresses.
|
||||||
SourceHashing = "sh"
|
SourceHashing = "sh"
|
||||||
|
|
||||||
|
// WeightedRoundRobin assigns jobs to real servers proportionally
|
||||||
|
// to there real servers' weight. Servers with higher weights
|
||||||
|
// receive new jobs first and get more jobs than servers
|
||||||
|
// with lower weights. Servers with equal weights get
|
||||||
|
// an equal distribution of new jobs
|
||||||
|
WeightedRoundRobin = "wrr"
|
||||||
|
|
||||||
|
// WeightedLeastConnection assigns more jobs to servers
|
||||||
|
// with fewer jobs and relative to the real servers' weight
|
||||||
|
WeightedLeastConnection = "wlc"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
|
@ -1,5 +1,8 @@
|
||||||
// +build linux
|
// +build linux
|
||||||
|
|
||||||
|
// Code and documentation copyright 2015 Docker, inc.
|
||||||
|
// Code released under the Apache 2.0 license. Docs released under Creative commons.
|
||||||
|
|
||||||
package ipvs
|
package ipvs
|
||||||
|
|
||||||
import (
|
import (
|
|
@ -1,10 +1,14 @@
|
||||||
// +build linux
|
// +build linux
|
||||||
|
|
||||||
|
// Code and documentation copyright 2015 Docker, inc.
|
||||||
|
// Code released under the Apache 2.0 license. Docs released under Creative commons.
|
||||||
|
|
||||||
package ipvs
|
package ipvs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
@ -315,6 +319,7 @@ func assembleStats(msg []byte) (SvcStats, error) {
|
||||||
func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) {
|
func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) {
|
||||||
|
|
||||||
var s Service
|
var s Service
|
||||||
|
var addressBytes []byte
|
||||||
|
|
||||||
for _, attr := range attrs {
|
for _, attr := range attrs {
|
||||||
|
|
||||||
|
@ -327,11 +332,7 @@ func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) {
|
||||||
case ipvsSvcAttrProtocol:
|
case ipvsSvcAttrProtocol:
|
||||||
s.Protocol = native.Uint16(attr.Value)
|
s.Protocol = native.Uint16(attr.Value)
|
||||||
case ipvsSvcAttrAddress:
|
case ipvsSvcAttrAddress:
|
||||||
ip, err := parseIP(attr.Value, s.AddressFamily)
|
addressBytes = attr.Value
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s.Address = ip
|
|
||||||
case ipvsSvcAttrPort:
|
case ipvsSvcAttrPort:
|
||||||
s.Port = binary.BigEndian.Uint16(attr.Value)
|
s.Port = binary.BigEndian.Uint16(attr.Value)
|
||||||
case ipvsSvcAttrFWMark:
|
case ipvsSvcAttrFWMark:
|
||||||
|
@ -353,6 +354,16 @@ func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parse Address after parse AddressFamily incase of parseIP error
|
||||||
|
if addressBytes != nil {
|
||||||
|
ip, err := parseIP(addressBytes, s.AddressFamily)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s.Address = ip
|
||||||
|
}
|
||||||
|
|
||||||
return &s, nil
|
return &s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -416,18 +427,18 @@ func (i *Handle) doCmdWithoutAttr(cmd uint8) ([][]byte, error) {
|
||||||
func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) {
|
func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) {
|
||||||
|
|
||||||
var d Destination
|
var d Destination
|
||||||
|
var addressBytes []byte
|
||||||
|
|
||||||
for _, attr := range attrs {
|
for _, attr := range attrs {
|
||||||
|
|
||||||
attrType := int(attr.Attr.Type)
|
attrType := int(attr.Attr.Type)
|
||||||
|
|
||||||
switch attrType {
|
switch attrType {
|
||||||
|
|
||||||
|
case ipvsDestAttrAddressFamily:
|
||||||
|
d.AddressFamily = native.Uint16(attr.Value)
|
||||||
case ipvsDestAttrAddress:
|
case ipvsDestAttrAddress:
|
||||||
ip, err := parseIP(attr.Value, syscall.AF_INET)
|
addressBytes = attr.Value
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
d.Address = ip
|
|
||||||
case ipvsDestAttrPort:
|
case ipvsDestAttrPort:
|
||||||
d.Port = binary.BigEndian.Uint16(attr.Value)
|
d.Port = binary.BigEndian.Uint16(attr.Value)
|
||||||
case ipvsDestAttrForwardingMethod:
|
case ipvsDestAttrForwardingMethod:
|
||||||
|
@ -438,8 +449,6 @@ func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error)
|
||||||
d.UpperThreshold = native.Uint32(attr.Value)
|
d.UpperThreshold = native.Uint32(attr.Value)
|
||||||
case ipvsDestAttrLowerThreshold:
|
case ipvsDestAttrLowerThreshold:
|
||||||
d.LowerThreshold = native.Uint32(attr.Value)
|
d.LowerThreshold = native.Uint32(attr.Value)
|
||||||
case ipvsDestAttrAddressFamily:
|
|
||||||
d.AddressFamily = native.Uint16(attr.Value)
|
|
||||||
case ipvsDestAttrActiveConnections:
|
case ipvsDestAttrActiveConnections:
|
||||||
d.ActiveConnections = int(native.Uint16(attr.Value))
|
d.ActiveConnections = int(native.Uint16(attr.Value))
|
||||||
case ipvsDestAttrInactiveConnections:
|
case ipvsDestAttrInactiveConnections:
|
||||||
|
@ -452,9 +461,63 @@ func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error)
|
||||||
d.Stats = DstStats(stats)
|
d.Stats = DstStats(stats)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// in older kernels (< 3.18), the destination address family attribute doesn't exist so we must
|
||||||
|
// assume it based on the destination address provided.
|
||||||
|
if d.AddressFamily == 0 {
|
||||||
|
// we can't check the address family using net stdlib because netlink returns
|
||||||
|
// IPv4 addresses as the first 4 bytes in a []byte of length 16 where as
|
||||||
|
// stdlib expects it as the last 4 bytes.
|
||||||
|
addressFamily, err := getIPFamily(addressBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
d.AddressFamily = addressFamily
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse Address after parse AddressFamily incase of parseIP error
|
||||||
|
if addressBytes != nil {
|
||||||
|
ip, err := parseIP(addressBytes, d.AddressFamily)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
d.Address = ip
|
||||||
|
}
|
||||||
|
|
||||||
return &d, nil
|
return &d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getIPFamily parses the IP family based on raw data from netlink.
|
||||||
|
// For AF_INET, netlink will set the first 4 bytes with trailing zeros
|
||||||
|
// 10.0.0.1 -> [10 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]
|
||||||
|
// For AF_INET6, the full 16 byte array is used:
|
||||||
|
// 2001:db8:3c4d:15::1a00 -> [32 1 13 184 60 77 0 21 0 0 0 0 0 0 26 0]
|
||||||
|
func getIPFamily(address []byte) (uint16, error) {
|
||||||
|
if len(address) == 4 {
|
||||||
|
return syscall.AF_INET, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if isZeros(address) {
|
||||||
|
return 0, errors.New("could not parse IP family from address data")
|
||||||
|
}
|
||||||
|
|
||||||
|
// assume IPv4 if first 4 bytes are non-zero but rest of the data is trailing zeros
|
||||||
|
if !isZeros(address[:4]) && isZeros(address[4:]) {
|
||||||
|
return syscall.AF_INET, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return syscall.AF_INET6, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isZeros(b []byte) bool {
|
||||||
|
for i := 0; i < len(b); i++ {
|
||||||
|
if b[i] != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// parseDestination given a ipvs netlink response this function will respond with a valid destination entry, an error otherwise
|
// parseDestination given a ipvs netlink response this function will respond with a valid destination entry, an error otherwise
|
||||||
func (i *Handle) parseDestination(msg []byte) (*Destination, error) {
|
func (i *Handle) parseDestination(msg []byte) (*Destination, error) {
|
||||||
var dst *Destination
|
var dst *Destination
|
|
@ -121,6 +121,11 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||||
diskEncryptionSetID := ""
|
diskEncryptionSetID := ""
|
||||||
writeAcceleratorEnabled := false
|
writeAcceleratorEnabled := false
|
||||||
|
|
||||||
|
vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
if isManagedDisk {
|
if isManagedDisk {
|
||||||
diskName := path.Base(diskURI)
|
diskName := path.Base(diskURI)
|
||||||
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
|
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
|
||||||
|
@ -140,9 +145,12 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||||
attachErr := fmt.Sprintf(
|
attachErr := fmt.Sprintf(
|
||||||
"disk(%s) already attached to node(%s), could not be attached to node(%s)",
|
"disk(%s) already attached to node(%s), could not be attached to node(%s)",
|
||||||
diskURI, *disk.ManagedBy, nodeName)
|
diskURI, *disk.ManagedBy, nodeName)
|
||||||
attachedNode := path.Base(*disk.ManagedBy)
|
attachedNode, err := vmset.GetNodeNameByProviderID(*disk.ManagedBy)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
klog.V(2).Infof("found dangling volume %s attached to node %s", diskURI, attachedNode)
|
klog.V(2).Infof("found dangling volume %s attached to node %s", diskURI, attachedNode)
|
||||||
danglingErr := volerr.NewDanglingError(attachErr, types.NodeName(attachedNode), "")
|
danglingErr := volerr.NewDanglingError(attachErr, attachedNode, "")
|
||||||
return -1, danglingErr
|
return -1, danglingErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,11 +165,6 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName)
|
instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Warningf("failed to get azure instance id (%v) for node %s", err, nodeName)
|
klog.Warningf("failed to get azure instance id (%v) for node %s", err, nodeName)
|
||||||
|
|
|
@ -407,7 +407,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
|
||||||
if pipID == nil {
|
if pipID == nil {
|
||||||
return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress ID is Nil", serviceName, *lb.Name)
|
return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress ID is Nil", serviceName, *lb.Name)
|
||||||
}
|
}
|
||||||
pipName, err := getLastSegment(*pipID)
|
pipName, err := getLastSegment(*pipID, "/")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress Name from ID(%s)", serviceName, *lb.Name, *pipID)
|
return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress Name from ID(%s)", serviceName, *lb.Name, *pipID)
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,7 +72,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var errNotInVMSet = errors.New("vm is not in the vmset")
|
var errNotInVMSet = errors.New("vm is not in the vmset")
|
||||||
var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
|
var providerIDRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
|
||||||
var backendPoolIDRE = regexp.MustCompile(`^/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Network/loadBalancers/(.+)/backendAddressPools/(?:.*)`)
|
var backendPoolIDRE = regexp.MustCompile(`^/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Network/loadBalancers/(.+)/backendAddressPools/(?:.*)`)
|
||||||
var nicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/networkInterfaces/(?:.*)`)
|
var nicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/networkInterfaces/(?:.*)`)
|
||||||
|
|
||||||
|
@ -171,8 +171,8 @@ func isMasterNode(node *v1.Node) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns the deepest child's identifier from a full identifier string.
|
// returns the deepest child's identifier from a full identifier string.
|
||||||
func getLastSegment(ID string) (string, error) {
|
func getLastSegment(ID, separator string) (string, error) {
|
||||||
parts := strings.Split(ID, "/")
|
parts := strings.Split(ID, separator)
|
||||||
name := parts[len(parts)-1]
|
name := parts[len(parts)-1]
|
||||||
if len(name) == 0 {
|
if len(name) == 0 {
|
||||||
return "", fmt.Errorf("resource name was missing from identifier")
|
return "", fmt.Errorf("resource name was missing from identifier")
|
||||||
|
@ -519,7 +519,7 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error)
|
||||||
publicIP := ""
|
publicIP := ""
|
||||||
if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
|
if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
|
||||||
pipID := *ipConfig.PublicIPAddress.ID
|
pipID := *ipConfig.PublicIPAddress.ID
|
||||||
pipName, err := getLastSegment(pipID)
|
pipName, err := getLastSegment(pipID, "/")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", fmt.Errorf("failed to publicIP name for node %q with pipID %q", name, pipID)
|
return "", "", fmt.Errorf("failed to publicIP name for node %q with pipID %q", name, pipID)
|
||||||
}
|
}
|
||||||
|
@ -589,7 +589,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP
|
||||||
// already added in the list
|
// already added in the list
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
asName, err := getLastSegment(asID)
|
asName, err := getLastSegment(asID, "/")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
|
klog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -680,7 +680,7 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return network.Interface{}, err
|
return network.Interface{}, err
|
||||||
}
|
}
|
||||||
nicName, err := getLastSegment(primaryNicID)
|
nicName, err := getLastSegment(primaryNicID, "/")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return network.Interface{}, err
|
return network.Interface{}, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -189,6 +189,16 @@ func (ss *scaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) (
|
||||||
|
|
||||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||||
func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
||||||
|
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if managedByAS {
|
||||||
|
// vm is managed by availability set.
|
||||||
|
return ss.availabilitySet.GetPowerStatusByNodeName(name)
|
||||||
|
}
|
||||||
|
|
||||||
_, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault)
|
_, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return powerState, err
|
return powerState, err
|
||||||
|
@ -285,6 +295,11 @@ func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||||
|
// providerID example:
|
||||||
|
// 1. vmas providerID: azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-27053986-0
|
||||||
|
// 2. vmss providerID:
|
||||||
|
// azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/1
|
||||||
|
// /subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/k8s-agentpool-36841236-vmss_1
|
||||||
func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||||
// NodeName is not part of providerID for vmss instances.
|
// NodeName is not part of providerID for vmss instances.
|
||||||
scaleSetName, err := extractScaleSetNameByProviderID(providerID)
|
scaleSetName, err := extractScaleSetNameByProviderID(providerID)
|
||||||
|
@ -298,12 +313,20 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
|
||||||
return "", fmt.Errorf("error of extracting resource group for node %q", providerID)
|
return "", fmt.Errorf("error of extracting resource group for node %q", providerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
instanceID, err := getLastSegment(providerID)
|
instanceID, err := getLastSegment(providerID, "/")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
klog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// instanceID contains scaleSetName (returned by disk.ManagedBy), e.g. k8s-agentpool-36841236-vmss_1
|
||||||
|
if strings.HasPrefix(strings.ToLower(instanceID), strings.ToLower(scaleSetName)) {
|
||||||
|
instanceID, err = getLastSegment(instanceID, "_")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, azcache.CacheReadTypeUnsafe)
|
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, azcache.CacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -695,7 +718,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
|
||||||
return network.Interface{}, err
|
return network.Interface{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nicName, err := getLastSegment(primaryInterfaceID)
|
nicName, err := getLastSegment(primaryInterfaceID, "/")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
|
klog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
|
||||||
return network.Interface{}, err
|
return network.Interface{}, err
|
||||||
|
|
|
@ -415,8 +415,6 @@ github.com/docker/go-events
|
||||||
github.com/docker/go-metrics
|
github.com/docker/go-metrics
|
||||||
# github.com/docker/go-units v0.4.0
|
# github.com/docker/go-units v0.4.0
|
||||||
github.com/docker/go-units
|
github.com/docker/go-units
|
||||||
# github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652 => github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34
|
|
||||||
github.com/docker/libnetwork/ipvs
|
|
||||||
# github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96
|
# github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96
|
||||||
github.com/docker/spdystream
|
github.com/docker/spdystream
|
||||||
github.com/docker/spdystream/spdy
|
github.com/docker/spdystream/spdy
|
||||||
|
@ -1142,7 +1140,7 @@ gopkg.in/square/go-jose.v2/jwt
|
||||||
gopkg.in/warnings.v0
|
gopkg.in/warnings.v0
|
||||||
# gopkg.in/yaml.v2 v2.2.8
|
# gopkg.in/yaml.v2 v2.2.8
|
||||||
gopkg.in/yaml.v2
|
gopkg.in/yaml.v2
|
||||||
# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.2-k3s.1
|
# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.3-k3s1
|
||||||
k8s.io/api/admission/v1
|
k8s.io/api/admission/v1
|
||||||
k8s.io/api/admission/v1beta1
|
k8s.io/api/admission/v1beta1
|
||||||
k8s.io/api/admissionregistration/v1
|
k8s.io/api/admissionregistration/v1
|
||||||
|
@ -1186,7 +1184,7 @@ k8s.io/api/settings/v1alpha1
|
||||||
k8s.io/api/storage/v1
|
k8s.io/api/storage/v1
|
||||||
k8s.io/api/storage/v1alpha1
|
k8s.io/api/storage/v1alpha1
|
||||||
k8s.io/api/storage/v1beta1
|
k8s.io/api/storage/v1beta1
|
||||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.2-k3s.1
|
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.3-k3s1
|
||||||
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
||||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
||||||
|
@ -1226,7 +1224,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
|
||||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
||||||
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||||
# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.2-k3s.1
|
# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.3-k3s1
|
||||||
k8s.io/apimachinery/pkg/api/equality
|
k8s.io/apimachinery/pkg/api/equality
|
||||||
k8s.io/apimachinery/pkg/api/errors
|
k8s.io/apimachinery/pkg/api/errors
|
||||||
k8s.io/apimachinery/pkg/api/meta
|
k8s.io/apimachinery/pkg/api/meta
|
||||||
|
@ -1288,7 +1286,7 @@ k8s.io/apimachinery/pkg/watch
|
||||||
k8s.io/apimachinery/third_party/forked/golang/json
|
k8s.io/apimachinery/third_party/forked/golang/json
|
||||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||||
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.2-k3s.1
|
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.3-k3s1
|
||||||
k8s.io/apiserver/pkg/admission
|
k8s.io/apiserver/pkg/admission
|
||||||
k8s.io/apiserver/pkg/admission/configuration
|
k8s.io/apiserver/pkg/admission/configuration
|
||||||
k8s.io/apiserver/pkg/admission/initializer
|
k8s.io/apiserver/pkg/admission/initializer
|
||||||
|
@ -1418,7 +1416,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
|
||||||
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.2-k3s.1
|
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.3-k3s1
|
||||||
k8s.io/cli-runtime/pkg/genericclioptions
|
k8s.io/cli-runtime/pkg/genericclioptions
|
||||||
k8s.io/cli-runtime/pkg/kustomize
|
k8s.io/cli-runtime/pkg/kustomize
|
||||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
|
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
|
||||||
|
@ -1431,7 +1429,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
|
||||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
|
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
|
||||||
k8s.io/cli-runtime/pkg/printers
|
k8s.io/cli-runtime/pkg/printers
|
||||||
k8s.io/cli-runtime/pkg/resource
|
k8s.io/cli-runtime/pkg/resource
|
||||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.2-k3s.1
|
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.3-k3s1
|
||||||
k8s.io/client-go/discovery
|
k8s.io/client-go/discovery
|
||||||
k8s.io/client-go/discovery/cached
|
k8s.io/client-go/discovery/cached
|
||||||
k8s.io/client-go/discovery/cached/disk
|
k8s.io/client-go/discovery/cached/disk
|
||||||
|
@ -1665,7 +1663,7 @@ k8s.io/client-go/util/jsonpath
|
||||||
k8s.io/client-go/util/keyutil
|
k8s.io/client-go/util/keyutil
|
||||||
k8s.io/client-go/util/retry
|
k8s.io/client-go/util/retry
|
||||||
k8s.io/client-go/util/workqueue
|
k8s.io/client-go/util/workqueue
|
||||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.2-k3s.1
|
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.3-k3s1
|
||||||
k8s.io/cloud-provider
|
k8s.io/cloud-provider
|
||||||
k8s.io/cloud-provider/api
|
k8s.io/cloud-provider/api
|
||||||
k8s.io/cloud-provider/node/helpers
|
k8s.io/cloud-provider/node/helpers
|
||||||
|
@ -1673,13 +1671,13 @@ k8s.io/cloud-provider/service/helpers
|
||||||
k8s.io/cloud-provider/volume
|
k8s.io/cloud-provider/volume
|
||||||
k8s.io/cloud-provider/volume/errors
|
k8s.io/cloud-provider/volume/errors
|
||||||
k8s.io/cloud-provider/volume/helpers
|
k8s.io/cloud-provider/volume/helpers
|
||||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.2-k3s.1
|
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.3-k3s1
|
||||||
k8s.io/cluster-bootstrap/token/api
|
k8s.io/cluster-bootstrap/token/api
|
||||||
k8s.io/cluster-bootstrap/token/jws
|
k8s.io/cluster-bootstrap/token/jws
|
||||||
k8s.io/cluster-bootstrap/token/util
|
k8s.io/cluster-bootstrap/token/util
|
||||||
k8s.io/cluster-bootstrap/util/secrets
|
k8s.io/cluster-bootstrap/util/secrets
|
||||||
k8s.io/cluster-bootstrap/util/tokens
|
k8s.io/cluster-bootstrap/util/tokens
|
||||||
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.2-k3s.1
|
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.3-k3s1
|
||||||
k8s.io/code-generator/cmd/client-gen/args
|
k8s.io/code-generator/cmd/client-gen/args
|
||||||
k8s.io/code-generator/cmd/client-gen/generators
|
k8s.io/code-generator/cmd/client-gen/generators
|
||||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||||
|
@ -1694,7 +1692,7 @@ k8s.io/code-generator/cmd/lister-gen/args
|
||||||
k8s.io/code-generator/cmd/lister-gen/generators
|
k8s.io/code-generator/cmd/lister-gen/generators
|
||||||
k8s.io/code-generator/pkg/namer
|
k8s.io/code-generator/pkg/namer
|
||||||
k8s.io/code-generator/pkg/util
|
k8s.io/code-generator/pkg/util
|
||||||
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.2-k3s.1
|
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.3-k3s1
|
||||||
k8s.io/component-base/cli/flag
|
k8s.io/component-base/cli/flag
|
||||||
k8s.io/component-base/cli/globalflag
|
k8s.io/component-base/cli/globalflag
|
||||||
k8s.io/component-base/codec
|
k8s.io/component-base/codec
|
||||||
|
@ -1712,10 +1710,10 @@ k8s.io/component-base/metrics/prometheus/workqueue
|
||||||
k8s.io/component-base/metrics/testutil
|
k8s.io/component-base/metrics/testutil
|
||||||
k8s.io/component-base/version
|
k8s.io/component-base/version
|
||||||
k8s.io/component-base/version/verflag
|
k8s.io/component-base/version/verflag
|
||||||
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.2-k3s.1
|
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.3-k3s1
|
||||||
k8s.io/cri-api/pkg/apis
|
k8s.io/cri-api/pkg/apis
|
||||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.2-k3s.1
|
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.3-k3s1
|
||||||
k8s.io/csi-translation-lib
|
k8s.io/csi-translation-lib
|
||||||
k8s.io/csi-translation-lib/plugins
|
k8s.io/csi-translation-lib/plugins
|
||||||
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
|
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
|
||||||
|
@ -1730,7 +1728,7 @@ k8s.io/gengo/types
|
||||||
k8s.io/heapster/metrics/api/v1/types
|
k8s.io/heapster/metrics/api/v1/types
|
||||||
# k8s.io/klog v1.0.0
|
# k8s.io/klog v1.0.0
|
||||||
k8s.io/klog
|
k8s.io/klog
|
||||||
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.2-k3s.1
|
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.3-k3s1
|
||||||
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
||||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||||
|
@ -1758,9 +1756,9 @@ k8s.io/kube-aggregator/pkg/controllers/status
|
||||||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||||
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||||
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
||||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.2-k3s.1
|
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.3-k3s1
|
||||||
k8s.io/kube-controller-manager/config/v1alpha1
|
k8s.io/kube-controller-manager/config/v1alpha1
|
||||||
# k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c
|
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
|
||||||
k8s.io/kube-openapi/pkg/aggregator
|
k8s.io/kube-openapi/pkg/aggregator
|
||||||
k8s.io/kube-openapi/pkg/builder
|
k8s.io/kube-openapi/pkg/builder
|
||||||
k8s.io/kube-openapi/pkg/common
|
k8s.io/kube-openapi/pkg/common
|
||||||
|
@ -1769,14 +1767,14 @@ k8s.io/kube-openapi/pkg/schemaconv
|
||||||
k8s.io/kube-openapi/pkg/util
|
k8s.io/kube-openapi/pkg/util
|
||||||
k8s.io/kube-openapi/pkg/util/proto
|
k8s.io/kube-openapi/pkg/util/proto
|
||||||
k8s.io/kube-openapi/pkg/util/proto/validation
|
k8s.io/kube-openapi/pkg/util/proto/validation
|
||||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.2-k3s.1
|
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.3-k3s1
|
||||||
k8s.io/kube-proxy/config/v1alpha1
|
k8s.io/kube-proxy/config/v1alpha1
|
||||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.2-k3s.1
|
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.3-k3s1
|
||||||
k8s.io/kube-scheduler/config/v1
|
k8s.io/kube-scheduler/config/v1
|
||||||
k8s.io/kube-scheduler/config/v1alpha1
|
k8s.io/kube-scheduler/config/v1alpha1
|
||||||
k8s.io/kube-scheduler/config/v1alpha2
|
k8s.io/kube-scheduler/config/v1alpha2
|
||||||
k8s.io/kube-scheduler/extender/v1
|
k8s.io/kube-scheduler/extender/v1
|
||||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.2-k3s.1
|
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.3-k3s1
|
||||||
k8s.io/kubectl/pkg/apps
|
k8s.io/kubectl/pkg/apps
|
||||||
k8s.io/kubectl/pkg/cmd
|
k8s.io/kubectl/pkg/cmd
|
||||||
k8s.io/kubectl/pkg/cmd/annotate
|
k8s.io/kubectl/pkg/cmd/annotate
|
||||||
|
@ -1851,11 +1849,11 @@ k8s.io/kubectl/pkg/util/storage
|
||||||
k8s.io/kubectl/pkg/util/templates
|
k8s.io/kubectl/pkg/util/templates
|
||||||
k8s.io/kubectl/pkg/util/term
|
k8s.io/kubectl/pkg/util/term
|
||||||
k8s.io/kubectl/pkg/validation
|
k8s.io/kubectl/pkg/validation
|
||||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.2-k3s.1
|
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.3-k3s1
|
||||||
k8s.io/kubelet/config/v1beta1
|
k8s.io/kubelet/config/v1beta1
|
||||||
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
|
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
|
||||||
k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
||||||
# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.2-k3s.1
|
# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.3-k3s1
|
||||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app
|
k8s.io/kubernetes/cmd/cloud-controller-manager/app
|
||||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
|
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
|
||||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
|
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
|
||||||
|
@ -2598,7 +2596,8 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
|
||||||
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
||||||
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
||||||
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
||||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.2-k3s.1
|
k8s.io/kubernetes/third_party/forked/ipvs
|
||||||
|
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.3-k3s1
|
||||||
k8s.io/legacy-cloud-providers/aws
|
k8s.io/legacy-cloud-providers/aws
|
||||||
k8s.io/legacy-cloud-providers/azure
|
k8s.io/legacy-cloud-providers/azure
|
||||||
k8s.io/legacy-cloud-providers/azure/auth
|
k8s.io/legacy-cloud-providers/azure/auth
|
||||||
|
@ -2629,7 +2628,7 @@ k8s.io/legacy-cloud-providers/openstack
|
||||||
k8s.io/legacy-cloud-providers/vsphere
|
k8s.io/legacy-cloud-providers/vsphere
|
||||||
k8s.io/legacy-cloud-providers/vsphere/vclib
|
k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.2-k3s.1
|
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.3-k3s1
|
||||||
k8s.io/metrics/pkg/apis/custom_metrics
|
k8s.io/metrics/pkg/apis/custom_metrics
|
||||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
||||||
|
|
Loading…
Reference in New Issue