mirror of https://github.com/k3s-io/k3s
commit
4c08a74b8b
50
go.mod
50
go.mod
|
@ -31,31 +31,31 @@ replace (
|
|||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
||||
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
|
||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.0-k3s.1
|
||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.0-k3s.1
|
||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.0-k3s.1
|
||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.0-k3s.1
|
||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.0-k3s.1
|
||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.0-k3s.1
|
||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.0-k3s.1
|
||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.0-k3s.1
|
||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.0-k3s.1
|
||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.0-k3s.1
|
||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.0-k3s.1
|
||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.0-k3s.1
|
||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.0-k3s.1
|
||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.0-k3s.1
|
||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.0-k3s.1
|
||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.0-k3s.1
|
||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.0-k3s.1
|
||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.0-k3s.1
|
||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.17.0-k3s.1
|
||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.0-k3s.1
|
||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.0-k3s.1
|
||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.17.0-k3s.1
|
||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.17.0-k3s.1
|
||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.17.0-k3s.1
|
||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.17.0-k3s.1
|
||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.1-k3s.1
|
||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.1-k3s.1
|
||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.1-k3s.1
|
||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.1-k3s.1
|
||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.1-k3s.1
|
||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.1-k3s.1
|
||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.1-k3s.1
|
||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.1-k3s.1
|
||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.1-k3s.1
|
||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.1-k3s.1
|
||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.1-k3s.1
|
||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.1-k3s.1
|
||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.1-k3s.1
|
||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.1-k3s.1
|
||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.1-k3s.1
|
||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.1-k3s.1
|
||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.1-k3s.1
|
||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.1-k3s.1
|
||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.17.1-k3s.1
|
||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.1-k3s.1
|
||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.1-k3s.1
|
||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.17.1-k3s.1
|
||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.17.1-k3s.1
|
||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.17.1-k3s.1
|
||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.17.1-k3s.1
|
||||
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
||||
)
|
||||
|
||||
|
|
128
go.sum
128
go.sum
|
@ -246,6 +246,48 @@ github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nI
|
|||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/galal-hussein/kubernetes v1.17.1-rc.1-k3s.1 h1:kx6i6K0LbMBiaItXTGi/WDYXlZ6a5PPqBtFMV/2HW0M=
|
||||
github.com/galal-hussein/kubernetes v1.17.1-rc.1-k3s.1/go.mod h1:NbNV+69yL3eKiKDJ+ZEjqOplN3BFXKBeunzkoOy8WLo=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/api v1.17.1-rc.1-k3s.1 h1:Bt+GnaEp9EmeXRADxqjRZXBPUwrlEgeMEuJBLkZFEaw=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/api v1.17.1-rc.1-k3s.1/go.mod h1:D7sf1F7GAHEK5DOn11dKP5J/QJvUShU2AopXe/AXghk=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.1-rc.1-k3s.1 h1:TelAPXWuf1ES3LTS7RsXMq9cmLilhum7IqCCD+5tXtw=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.1-rc.1-k3s.1/go.mod h1:HjqmpMjOO0RGrZVKCmHhQNgz6nzkzcEQ+TEIwzp14A0=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/apimachinery v1.17.1-rc.1-k3s.1 h1:LmIwa9f4slN0QEWPpVImYuPS7T4lWC4PlquVabuIhiY=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/apimachinery v1.17.1-rc.1-k3s.1/go.mod h1:1WXjse1VhndJdimFoqVzt1J+RlmhvGxU8CMVWuAuV4k=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/apiserver v1.17.1-rc.1-k3s.1 h1:Q3vlVD8CisRmF+LVp7mzrSEpqxsazBR3vm8+a9pqg8U=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/apiserver v1.17.1-rc.1-k3s.1/go.mod h1:3xmck1xz/FJcHnFUOjon3VC1HCe6TMMBIH8VSEuzcvM=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/cli-runtime v1.17.1-rc.1-k3s.1 h1:/7rPuxdT1iXmOX3mDdVyqbupbT267epZwof0UweFHto=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/cli-runtime v1.17.1-rc.1-k3s.1/go.mod h1:2j76SpnTiazjWbdidJY0tDtSLe6k0tY2v3vV9TLV6Cg=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/client-go v1.17.1-rc.1-k3s.1 h1:UMIYH+AYAhYS0z/a/Q2qtnTeb3Bi3dlGv7sy4mdktXU=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/client-go v1.17.1-rc.1-k3s.1/go.mod h1:2yNkwZhzqhHeXaY0SR4OH76H3qwXdjDsZCySPrW9LjI=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/cloud-provider v1.17.1-rc.1-k3s.1 h1:Vo6QHwYhpnvC29F0yUFVTrWG9xlGvdr2WkDSOPH+s+A=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/cloud-provider v1.17.1-rc.1-k3s.1/go.mod h1:ycFddVBapK98UCwexOuDsvQLD3tfiVdRlXm2jjNP14c=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.1-rc.1-k3s.1 h1:BluHnYTW+0dzdHWOFR+dmESLZb+r7nFN58+jjIbv4fo=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.1-rc.1-k3s.1/go.mod h1:Y9cWlhZVXTIx8hPYp4KajB9oNl1dBbba6LJndYpVnQo=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/code-generator v1.17.1-rc.1-k3s.1 h1:YSWAOv6LK81+yIQkK2QakRBVT5l0uP/mmlHu/wOormo=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/code-generator v1.17.1-rc.1-k3s.1/go.mod h1:2Gsj2VJhAB6dTcnR+841H51LUCyhufQPSzTQWRnevwY=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/component-base v1.17.1-rc.1-k3s.1 h1:No4E+PbK3L3WfJGtfZ5GZLTIZ8OGophg7zGKSHuXIrU=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/component-base v1.17.1-rc.1-k3s.1/go.mod h1:cTBtMr/eUcihQ4sFhgiDPlmlFpn1xBY37yQ+zNcT1IE=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/cri-api v1.17.1-rc.1-k3s.1 h1:oa9eBdj/pyvTXtbpRM+MkkfgO0sst8DZvqOZEvvXiic=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/cri-api v1.17.1-rc.1-k3s.1/go.mod h1:za6HqWgDNrKSvaq+Zg8GwyNeZifm/H9eMXr7yLJ+UdA=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.1-rc.1-k3s.1 h1:qydFjITBPDZp6mqRZKiLMrH6RVBPU9rdjZg3RpPcUtg=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.1-rc.1-k3s.1/go.mod h1:LO9jViQKnx/FqMjlXE9SmKjZ3I4PE/SLsaDWKTlmiRw=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.1-rc.1-k3s.1 h1:ba5MNgBrgflr5cS1ejlbEqWUdPF1NBP74OKMPQEVbqg=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.1-rc.1-k3s.1/go.mod h1:vh3GqAHrJU/N5vuEaKFmWYb78D7L6fiVBS4Id3OyDSc=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.1-rc.1-k3s.1 h1:xvWmHosyGoOQxRrjCUV4Sd2dyx0Di0suGIgNreIuIoc=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.1-rc.1-k3s.1/go.mod h1:mKHow8zS1GpDcLHlAgghj2BKPOZrpEIHA30lWyRF3gg=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/kube-proxy v1.17.1-rc.1-k3s.1 h1:+oPV8CgGtWJMt2wvx7IV1DzPuWmzSHgKU16Yct6r/O0=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/kube-proxy v1.17.1-rc.1-k3s.1/go.mod h1:6SB+e2ragxZ8CPWrgNZ7F9KkCFykx5zQjnLVP2SWQ00=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.1-rc.1-k3s.1 h1:v968Mzv6g8WyO2347YsVOnhJsEm7PV8HFyJvQylCfXs=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.1-rc.1-k3s.1/go.mod h1:8rHmgUeQCsvaVrSy79lQc3DKD5PQp9cJH0DLw2GSAdk=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/kubectl v1.17.1-rc.1-k3s.1 h1:KWKaqnwATiO/2eVUzDHEF3/CPjRUchXoqXrNOUjJbO8=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/kubectl v1.17.1-rc.1-k3s.1/go.mod h1:xacTENgh5ed/fa8fjur271/livetYIFI+P81WK+Ez9s=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/kubelet v1.17.1-rc.1-k3s.1 h1:7yrLSVw9+JGk+0oETlcRpqaE/GHVSb8EUzMtptU33bs=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/kubelet v1.17.1-rc.1-k3s.1/go.mod h1:oHq6KV/WjiamPxKs2Np7JxcOEwHXfMybRBnLrMsTOHs=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.1-rc.1-k3s.1/go.mod h1:GTPm6pl8yIFEzk7bX03iY6kI5Bpt0Qd+BN3ajg/qeUI=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/metrics v1.17.1-rc.1-k3s.1 h1:pzkQVR7J5jicCinb51IfKiDjVN1n2MOx+tB7RnOOAA8=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/metrics v1.17.1-rc.1-k3s.1/go.mod h1:e70DTGI+y72YcU8iGDifilyP5uW6sPmBflDEjtiH/p0=
|
||||
github.com/galal-hussein/kubernetes/staging/src/k8s.io/sample-apiserver v1.17.1-rc.1-k3s.1/go.mod h1:Ey49UYt1h2yQl0P61nfZEo/LZUu78Bj55oBL/VOejW4=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
|
@ -721,47 +763,133 @@ github.com/rancher/kine v0.3.3 h1:FTOQN1qHQMzMfA8/nd9/rWfdR3Xk4PbXcOqw5P3dJDw=
|
|||
github.com/rancher/kine v0.3.3/go.mod h1:xEMl0tLCva9/9me7mXJ3m9Vo6yqHgC4OU3NiK4CPrGQ=
|
||||
github.com/rancher/kubernetes v1.17.0-k3s.1 h1:g1xvTHOHMJxwWtseblor0gighLRHpL7Bf9MwX8HR3W0=
|
||||
github.com/rancher/kubernetes v1.17.0-k3s.1/go.mod h1:NbNV+69yL3eKiKDJ+ZEjqOplN3BFXKBeunzkoOy8WLo=
|
||||
github.com/rancher/kubernetes v1.17.1-k3s.1 h1:dwL66NxvK0B5oaoHfxBHWf0Q7ejZt7RlyyT8RrwdfQI=
|
||||
github.com/rancher/kubernetes v1.17.1-k3s.1/go.mod h1:NbNV+69yL3eKiKDJ+ZEjqOplN3BFXKBeunzkoOy8WLo=
|
||||
github.com/rancher/kubernetes v1.17.1-rc.1-k3s.1 h1:H41X9pDo3fLPZypxFK6hJHIAwmsLh1SWos6NJwsevE0=
|
||||
github.com/rancher/kubernetes v1.17.1-rc.1-k3s.1/go.mod h1:NbNV+69yL3eKiKDJ+ZEjqOplN3BFXKBeunzkoOy8WLo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.0-k3s.1 h1:L2mS7D+Kv/0ZUg9uJZcPfKuDCYcKOTprTQsK35i2hFg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.0-k3s.1/go.mod h1:D7sf1F7GAHEK5DOn11dKP5J/QJvUShU2AopXe/AXghk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.1-k3s.1 h1:pNUEJtc4jjX1c5j8fV4Bq8O6LwfyLferXGMHnxZkKJE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.1-k3s.1/go.mod h1:D7sf1F7GAHEK5DOn11dKP5J/QJvUShU2AopXe/AXghk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.1-rc.1-k3s.1 h1:o+HEI9Sq781FxTztUUGuc2uHTUkBj88ZxJrtkW4kJ0A=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.1-rc.1-k3s.1/go.mod h1:D7sf1F7GAHEK5DOn11dKP5J/QJvUShU2AopXe/AXghk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.0-k3s.1 h1:F4YzJg4x2LmghHs1BvI3wfy0oMUkrqAZIZ/qBWcQpQ0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.0-k3s.1/go.mod h1:HjqmpMjOO0RGrZVKCmHhQNgz6nzkzcEQ+TEIwzp14A0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.1-k3s.1 h1:Gwsv3Hzi8W9T1+WPsWciXgeXNeQAamZXC5flbxh6BnA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.1-k3s.1/go.mod h1:HjqmpMjOO0RGrZVKCmHhQNgz6nzkzcEQ+TEIwzp14A0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.1-rc.1-k3s.1 h1:+TrCP6UgBWqYXKK1u9RuxBm/o4kYMF150K9ZkzuRBzc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.1-rc.1-k3s.1/go.mod h1:HjqmpMjOO0RGrZVKCmHhQNgz6nzkzcEQ+TEIwzp14A0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.0-k3s.1 h1:LWYl4fuVNzMzTKE/hUpn7Pr0yn4nPsL27EDCno0FPOY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.0-k3s.1/go.mod h1:1WXjse1VhndJdimFoqVzt1J+RlmhvGxU8CMVWuAuV4k=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.1-k3s.1 h1:lDPu2Y9Kg/cU0sdwClcpnAJFr3NBw97w16IdBetihYA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.1-k3s.1/go.mod h1:1WXjse1VhndJdimFoqVzt1J+RlmhvGxU8CMVWuAuV4k=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.1-rc.1-k3s.1 h1:zrN43ybSD9DF26kAATEb5L6CygUi89gJXdLd0V5TZ58=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.1-rc.1-k3s.1/go.mod h1:1WXjse1VhndJdimFoqVzt1J+RlmhvGxU8CMVWuAuV4k=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.0-k3s.1 h1:eU0ZzJXuJvJmbyf2wm2mJKdUnKe4b+kxnS8p7Luu+9Q=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.0-k3s.1/go.mod h1:3xmck1xz/FJcHnFUOjon3VC1HCe6TMMBIH8VSEuzcvM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.1-k3s.1 h1:gA+8uaaOOPHPGIlps0fE1CF2LA9NSoahfyyQrzFgSHM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.1-k3s.1/go.mod h1:3xmck1xz/FJcHnFUOjon3VC1HCe6TMMBIH8VSEuzcvM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.1-rc.1-k3s.1 h1:rvRt8dzHKcwaytz3/PLIzee58ODeBC7E8fL/uYKjaGo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.1-rc.1-k3s.1/go.mod h1:3xmck1xz/FJcHnFUOjon3VC1HCe6TMMBIH8VSEuzcvM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.0-k3s.1 h1:Vji40xPd8WuqRG9RWXkE4JHN8TXIBKIHUhfMGbNZ1fs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.0-k3s.1/go.mod h1:2j76SpnTiazjWbdidJY0tDtSLe6k0tY2v3vV9TLV6Cg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.1-k3s.1 h1:s7rHOu3FJ6j0+/u64K4bLZqVpedaCvpyBwk8x5GnqnM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.1-k3s.1/go.mod h1:2j76SpnTiazjWbdidJY0tDtSLe6k0tY2v3vV9TLV6Cg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.1-rc.1-k3s.1 h1:PdBJnRB9GLmWd71I6O7FG2I+4SWj+G81RdQdY4yYSKQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.1-rc.1-k3s.1/go.mod h1:2j76SpnTiazjWbdidJY0tDtSLe6k0tY2v3vV9TLV6Cg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.0-k3s.1 h1:B840zigtwsgfULE59F0uCfd29LfLOcpkdBmbMfAswnI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.0-k3s.1/go.mod h1:2yNkwZhzqhHeXaY0SR4OH76H3qwXdjDsZCySPrW9LjI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.1-k3s.1 h1:0iJfyOFqWLSncWvX/JlC7koOcpcPkieEBnkYgPQqgvU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.1-k3s.1/go.mod h1:2yNkwZhzqhHeXaY0SR4OH76H3qwXdjDsZCySPrW9LjI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.1-rc.1-k3s.1 h1:qhcOqs3JHnRyGhJ8RxY67PsbsAXD98mxV4jjPaJUenk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.1-rc.1-k3s.1/go.mod h1:2yNkwZhzqhHeXaY0SR4OH76H3qwXdjDsZCySPrW9LjI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.0-k3s.1 h1:eo8/jIMvFsNESiSVDseuzbXMU2LyI2ISHsU6wlo+yv0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.0-k3s.1/go.mod h1:ycFddVBapK98UCwexOuDsvQLD3tfiVdRlXm2jjNP14c=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.1-k3s.1 h1:RCvWWjP8h1QqFyNDI6IPm+orZAhO7RaWpCIue0gA6AI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.1-k3s.1/go.mod h1:ycFddVBapK98UCwexOuDsvQLD3tfiVdRlXm2jjNP14c=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.1-rc.1-k3s.1 h1:EyQ47FosXHc/ydIi6rcNsE5VuGvp2Bu7+5VQSfNCk4g=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.1-rc.1-k3s.1/go.mod h1:ycFddVBapK98UCwexOuDsvQLD3tfiVdRlXm2jjNP14c=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.0-k3s.1 h1:3x2lha4Zlwqo7hqLAS/3kYV2V+eLDQJrkGfMtWwPimk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.0-k3s.1/go.mod h1:Y9cWlhZVXTIx8hPYp4KajB9oNl1dBbba6LJndYpVnQo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.1-k3s.1 h1:mcw1fpvh78vrP0KYNNPpXwX83uaUIhueRQksEAc01so=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.1-k3s.1/go.mod h1:Y9cWlhZVXTIx8hPYp4KajB9oNl1dBbba6LJndYpVnQo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.1-rc.1-k3s.1 h1:cdobko8t672GNZuwlU6GlYMakaS6X+BXTjNo8OglWvw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.1-rc.1-k3s.1/go.mod h1:Y9cWlhZVXTIx8hPYp4KajB9oNl1dBbba6LJndYpVnQo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.0-k3s.1 h1:iLxmbeOkPGRUmEEFxx4MOgRMLf5i5hHwIxh+5NKPOoQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.0-k3s.1/go.mod h1:2Gsj2VJhAB6dTcnR+841H51LUCyhufQPSzTQWRnevwY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.1-k3s.1 h1:G7lnD6pOQAxJ8B+u2T8mgByhmfKWJ9V+dl8jzB3u/Jg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.1-k3s.1/go.mod h1:2Gsj2VJhAB6dTcnR+841H51LUCyhufQPSzTQWRnevwY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.1-rc.1-k3s.1 h1:gCwfUvNNcPU8H7G4/hR7FgVryBpS6OZu2Fb0OjHHfmQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.1-rc.1-k3s.1/go.mod h1:2Gsj2VJhAB6dTcnR+841H51LUCyhufQPSzTQWRnevwY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.0-k3s.1 h1:JxdegmMPUm5Adr2aEF02dPXiDqRizjR61I0dniSt1VI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.0-k3s.1/go.mod h1:cTBtMr/eUcihQ4sFhgiDPlmlFpn1xBY37yQ+zNcT1IE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.1-k3s.1 h1:+IBTuU5/Ax8/jxELMvyvucWusmaY9lcd1FFz52326jY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.1-k3s.1/go.mod h1:cTBtMr/eUcihQ4sFhgiDPlmlFpn1xBY37yQ+zNcT1IE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.1-rc.1-k3s.1 h1:4BBs5a9u1ZlLpM6jYtUIMjIDjqcS/WRPqWRZViAXkc8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.1-rc.1-k3s.1/go.mod h1:cTBtMr/eUcihQ4sFhgiDPlmlFpn1xBY37yQ+zNcT1IE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.0-k3s.1 h1:Rgbv0fLtalfgJzqFhNIuZsy55DitUBuZlicik/fiYuQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.0-k3s.1/go.mod h1:za6HqWgDNrKSvaq+Zg8GwyNeZifm/H9eMXr7yLJ+UdA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.1-k3s.1 h1:HMOFF+Wc6FeXUUmRWWNEstZZXow95pXzoYgd86nmwBY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.1-k3s.1/go.mod h1:za6HqWgDNrKSvaq+Zg8GwyNeZifm/H9eMXr7yLJ+UdA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.1-rc.1-k3s.1 h1:qqM/nuKKbjemxNJULgxsw3Bi3KTFVLr7cMcWxS1NAlA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.1-rc.1-k3s.1/go.mod h1:za6HqWgDNrKSvaq+Zg8GwyNeZifm/H9eMXr7yLJ+UdA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.0-k3s.1 h1:l8ula9u6lb7F4yQt4lq2PuxbrQavO8N7P6Agyx1Z6N4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.0-k3s.1/go.mod h1:LO9jViQKnx/FqMjlXE9SmKjZ3I4PE/SLsaDWKTlmiRw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.1-k3s.1 h1:MQnmwgRO4ttrUokc0uLtGu7xvPDpXXl/ljwQOkK9rWI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.1-k3s.1/go.mod h1:LO9jViQKnx/FqMjlXE9SmKjZ3I4PE/SLsaDWKTlmiRw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.1-rc.1-k3s.1 h1:XnyIRri29W6xbZvCEmgBf2A5KP54iWl+5hpMPJPZ0d8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.1-rc.1-k3s.1/go.mod h1:LO9jViQKnx/FqMjlXE9SmKjZ3I4PE/SLsaDWKTlmiRw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.0-k3s.1 h1:qLQHxIEhqdotbHHgCiAh693EzT8dV0QPC6s6dJTziP4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.0-k3s.1/go.mod h1:vh3GqAHrJU/N5vuEaKFmWYb78D7L6fiVBS4Id3OyDSc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.1-k3s.1 h1:Wm1IRMpxmuql3Fvqp5JFh7G00sSDFlTvLXQIWqaEc+g=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.1-k3s.1/go.mod h1:vh3GqAHrJU/N5vuEaKFmWYb78D7L6fiVBS4Id3OyDSc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.1-rc.1-k3s.1 h1:iyoWvqpkgEpneNOcoRcyeh81SP6s0vRVjPsutgP07WE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.1-rc.1-k3s.1/go.mod h1:vh3GqAHrJU/N5vuEaKFmWYb78D7L6fiVBS4Id3OyDSc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.0-k3s.1 h1:glJGSKj3cx9ixwggXEW3qJVr353qrmwF/c/6RhkI0Pk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.0-k3s.1/go.mod h1:mKHow8zS1GpDcLHlAgghj2BKPOZrpEIHA30lWyRF3gg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.1-k3s.1 h1:C+1ipk1B4dOKKEElvb5gUncNytDQCMvBjZ5hCCPbvNA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.1-k3s.1/go.mod h1:mKHow8zS1GpDcLHlAgghj2BKPOZrpEIHA30lWyRF3gg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.1-rc.1-k3s.1 h1:f2cgAyeXShaoj4OaA93serUTG5j3D1MKyUPFtspecTI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.1-rc.1-k3s.1/go.mod h1:mKHow8zS1GpDcLHlAgghj2BKPOZrpEIHA30lWyRF3gg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.0-k3s.1 h1:CIjWDBF6mL9vaOBm/I8Hu6kzs2TRacVMVRxW59rOeoo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.0-k3s.1/go.mod h1:6SB+e2ragxZ8CPWrgNZ7F9KkCFykx5zQjnLVP2SWQ00=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.1-k3s.1 h1:jNM7sL/o/mja7nuMdDjwP4sTobLiX2m/FQXQ9AKXMP8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.1-k3s.1/go.mod h1:6SB+e2ragxZ8CPWrgNZ7F9KkCFykx5zQjnLVP2SWQ00=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.1-rc.1-k3s.1 h1:Kem520p/pZ6+dQ/gyK3mAQ5Xwe/+JUFARZukmHNiWbc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.1-rc.1-k3s.1/go.mod h1:6SB+e2ragxZ8CPWrgNZ7F9KkCFykx5zQjnLVP2SWQ00=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.0-k3s.1 h1:zX/XLpaqhGTSKk/68skv9FQUvyquQ7PgUzdQzlfyQPE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.0-k3s.1/go.mod h1:8rHmgUeQCsvaVrSy79lQc3DKD5PQp9cJH0DLw2GSAdk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.1-k3s.1 h1:2scWXPGBn4Wlei5tJO+fTlx1OURH9Nw5RuuI8vrBomc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.1-k3s.1/go.mod h1:8rHmgUeQCsvaVrSy79lQc3DKD5PQp9cJH0DLw2GSAdk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.1-rc.1-k3s.1 h1:/QnL4YFy6QBOdo2TvabJhbHfxMxuSJ80BhPYrZBuV9o=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.1-rc.1-k3s.1/go.mod h1:8rHmgUeQCsvaVrSy79lQc3DKD5PQp9cJH0DLw2GSAdk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.0-k3s.1 h1:zkiKEDIgwBPnF4f8Tao4kcUncPJ8R2Qlc0tgoMXAbLI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.0-k3s.1/go.mod h1:xacTENgh5ed/fa8fjur271/livetYIFI+P81WK+Ez9s=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.1-k3s.1 h1:0Hrq84p6J7OcO65Lss1qRIXljfpkd6othPzr3c+uyGw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.1-k3s.1/go.mod h1:xacTENgh5ed/fa8fjur271/livetYIFI+P81WK+Ez9s=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.1-rc.1-k3s.1 h1:YhwKdqQ2hszLjp37FVZ4Nx/564nmHQDfUk65R79olCQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.1-rc.1-k3s.1/go.mod h1:xacTENgh5ed/fa8fjur271/livetYIFI+P81WK+Ez9s=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.0-k3s.1 h1:K4XBTCZ5vHruETTN9420mww7snWS0phQAAPnCQtEvEE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.0-k3s.1/go.mod h1:oHq6KV/WjiamPxKs2Np7JxcOEwHXfMybRBnLrMsTOHs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.1-k3s.1 h1:N5a8p+ed1GNzvxrYQgpdeS1O0FCDR4xliIgI5EqefC0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.1-k3s.1/go.mod h1:oHq6KV/WjiamPxKs2Np7JxcOEwHXfMybRBnLrMsTOHs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.1-rc.1-k3s.1 h1:ou6pvO6PZWgFZPSIa5F733UUCqsCifYA3mNS3U7rvrs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.1-rc.1-k3s.1/go.mod h1:oHq6KV/WjiamPxKs2Np7JxcOEwHXfMybRBnLrMsTOHs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.0-k3s.1 h1:9dtPgCf1FZyUUL8laEwhDrZiSOCEafe+DgvjrqIBJI4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.0-k3s.1/go.mod h1:GTPm6pl8yIFEzk7bX03iY6kI5Bpt0Qd+BN3ajg/qeUI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.1-k3s.1 h1:cVm2Fegrt47IC2EaXjetK8ivr4EiJ9OoD9VQ9HGj2TM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.1-k3s.1/go.mod h1:GTPm6pl8yIFEzk7bX03iY6kI5Bpt0Qd+BN3ajg/qeUI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.1-rc.1-k3s.1 h1:XlHi6m7SDZOxlIFbYQGzzEcvUewV/ImROc8dk76ctzs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.1-rc.1-k3s.1/go.mod h1:GTPm6pl8yIFEzk7bX03iY6kI5Bpt0Qd+BN3ajg/qeUI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.0-k3s.1 h1:PZ2rc1wavm4mWNi3FijA9mwpfAV8IEzuuSS1uQ4k0/4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.0-k3s.1/go.mod h1:e70DTGI+y72YcU8iGDifilyP5uW6sPmBflDEjtiH/p0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.1-k3s.1 h1:VmfMUZMM+tExT0CiDHd9RlxgWqb3lWAmgYVo8GdUoIA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.1-k3s.1/go.mod h1:e70DTGI+y72YcU8iGDifilyP5uW6sPmBflDEjtiH/p0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.1-rc.1-k3s.1 h1:/qwJsJfRVPO3YYYFiMgpIwMnExPFTiml8Koh1s7tvyk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.1-rc.1-k3s.1/go.mod h1:e70DTGI+y72YcU8iGDifilyP5uW6sPmBflDEjtiH/p0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.17.0-k3s.1/go.mod h1:Ey49UYt1h2yQl0P61nfZEo/LZUu78Bj55oBL/VOejW4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.17.1-k3s.1/go.mod h1:Ey49UYt1h2yQl0P61nfZEo/LZUu78Bj55oBL/VOejW4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.17.1-rc.1-k3s.1/go.mod h1:Ey49UYt1h2yQl0P61nfZEo/LZUu78Bj55oBL/VOejW4=
|
||||
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
|
||||
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
|
||||
github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI=
|
||||
|
|
|
@ -463,6 +463,13 @@ func setDiscoveryDefaults(config *restclient.Config) error {
|
|||
if config.Timeout == 0 {
|
||||
config.Timeout = defaultTimeout
|
||||
}
|
||||
if config.Burst == 0 && config.QPS < 100 {
|
||||
// discovery is expected to be bursty, increase the default burst
|
||||
// to accommodate looking up resource info for many API groups.
|
||||
// matches burst set by ConfigFlags#ToDiscoveryClient().
|
||||
// see https://issue.k8s.io/86149
|
||||
config.Burst = 100
|
||||
}
|
||||
codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()}
|
||||
config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec})
|
||||
if len(config.UserAgent) == 0 {
|
||||
|
|
|
@ -3,8 +3,8 @@ package version
|
|||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "17"
|
||||
gitVersion = "v1.17.0-k3s.1"
|
||||
gitCommit = "dacbba10ba597c72898e0ee747f9a855ab093bc9"
|
||||
gitVersion = "v1.17.1-k3s.1"
|
||||
gitCommit = "c33b87d04b01f05ee53569e2807a7c85bb8a0ddc"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2019-12-11T20:03:48Z"
|
||||
buildDate = "2020-01-15T17:03:01Z"
|
||||
)
|
||||
|
|
|
@ -28,7 +28,6 @@ go_test(
|
|||
race = "off",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
|
|
|
@ -74,9 +74,6 @@ type Reflector struct {
|
|||
// observed when doing a sync with the underlying store
|
||||
// it is thread safe, but not synchronized with the underlying store
|
||||
lastSyncResourceVersion string
|
||||
// isLastSyncResourceVersionGone is true if the previous list or watch request with lastSyncResourceVersion
|
||||
// failed with an HTTP 410 (Gone) status code.
|
||||
isLastSyncResourceVersionGone bool
|
||||
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
|
||||
lastSyncResourceVersionMutex sync.RWMutex
|
||||
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
|
||||
|
@ -188,7 +185,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name)
|
||||
var resourceVersion string
|
||||
|
||||
options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}
|
||||
// Explicitly set "0" as resource version - it's fine for the List()
|
||||
// to be served from cache and potentially be delayed relative to
|
||||
// etcd contents. Reflector framework will catch up via Watch() eventually.
|
||||
options := metav1.ListOptions{ResourceVersion: "0"}
|
||||
|
||||
if err := func() error {
|
||||
initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name})
|
||||
|
@ -211,17 +211,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
if r.WatchListPageSize != 0 {
|
||||
pager.PageSize = r.WatchListPageSize
|
||||
}
|
||||
|
||||
// Pager falls back to full list if paginated list calls fail due to an "Expired" error.
|
||||
list, err = pager.List(context.Background(), options)
|
||||
if isExpiredError(err) {
|
||||
r.setIsLastSyncResourceVersionExpired(true)
|
||||
// Retry immediately if the resource version used to list is expired.
|
||||
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
|
||||
// continuation pages, but the pager might not be enabled, or the full list might fail because the
|
||||
// resource version it is listing at is expired, so we need to fallback to resourceVersion="" in all
|
||||
// to recover and ensure the reflector makes forward progress.
|
||||
list, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
|
||||
}
|
||||
close(listCh)
|
||||
}()
|
||||
select {
|
||||
|
@ -234,7 +225,6 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedTypeName, err)
|
||||
}
|
||||
r.setIsLastSyncResourceVersionExpired(false) // list was successful
|
||||
initTrace.Step("Objects listed")
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
|
@ -308,13 +298,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
switch {
|
||||
case isExpiredError(err):
|
||||
r.setIsLastSyncResourceVersionExpired(true)
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
case err == io.EOF:
|
||||
switch err {
|
||||
case io.EOF:
|
||||
// watch closed normally
|
||||
case err == io.ErrUnexpectedEOF:
|
||||
case io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err))
|
||||
|
@ -333,8 +320,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err != errorStopRequested {
|
||||
switch {
|
||||
case isExpiredError(err):
|
||||
r.setIsLastSyncResourceVersionExpired(true)
|
||||
case apierrs.IsResourceExpired(err):
|
||||
klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
|
||||
default:
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
|
||||
|
@ -446,42 +432,3 @@ func (r *Reflector) setLastSyncResourceVersion(v string) {
|
|||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.lastSyncResourceVersion = v
|
||||
}
|
||||
|
||||
// relistResourceVersion determines the resource version the reflector should list or relist from.
|
||||
// Returns either the lastSyncResourceVersion so that this reflector will relist with a resource
|
||||
// versions no older than has already been observed in relist results or watch events, or, if the last relist resulted
|
||||
// in an HTTP 410 (Gone) status code, returns "" so that the relist will use the latest resource version available in
|
||||
// etcd via a quorum read.
|
||||
func (r *Reflector) relistResourceVersion() string {
|
||||
r.lastSyncResourceVersionMutex.RLock()
|
||||
defer r.lastSyncResourceVersionMutex.RUnlock()
|
||||
|
||||
if r.isLastSyncResourceVersionGone {
|
||||
// Since this reflector makes paginated list requests, and all paginated list requests skip the watch cache
|
||||
// if the lastSyncResourceVersion is expired, we set ResourceVersion="" and list again to re-establish reflector
|
||||
// to the latest available ResourceVersion, using a consistent read from etcd.
|
||||
return ""
|
||||
}
|
||||
if r.lastSyncResourceVersion == "" {
|
||||
// For performance reasons, initial list performed by reflector uses "0" as resource version to allow it to
|
||||
// be served from the watch cache if it is enabled.
|
||||
return "0"
|
||||
}
|
||||
return r.lastSyncResourceVersion
|
||||
}
|
||||
|
||||
// setIsLastSyncResourceVersionExpired sets if the last list or watch request with lastSyncResourceVersion returned a
|
||||
// expired error: HTTP 410 (Gone) Status Code.
|
||||
func (r *Reflector) setIsLastSyncResourceVersionExpired(isExpired bool) {
|
||||
r.lastSyncResourceVersionMutex.Lock()
|
||||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.isLastSyncResourceVersionGone = isExpired
|
||||
}
|
||||
|
||||
func isExpiredError(err error) bool {
|
||||
// In Kubernetes 1.17 and earlier, the api server returns both apierrs.StatusReasonExpired and
|
||||
// apierrs.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent
|
||||
// and always returns apierrs.StatusReasonExpired. For backward compatibility we can only remove the apierrs.IsGone
|
||||
// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
|
||||
return apierrs.IsResourceExpired(err) || apierrs.IsGone(err)
|
||||
}
|
||||
|
|
|
@ -31,6 +31,9 @@ func Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(in *[]Na
|
|||
if err := Convert_v1_Cluster_To_api_Cluster(&curr.Cluster, newCluster, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if *out == nil {
|
||||
*out = make(map[string]*api.Cluster)
|
||||
}
|
||||
if (*out)[curr.Name] == nil {
|
||||
(*out)[curr.Name] = newCluster
|
||||
} else {
|
||||
|
@ -65,6 +68,9 @@ func Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(in *[]
|
|||
if err := Convert_v1_AuthInfo_To_api_AuthInfo(&curr.AuthInfo, newAuthInfo, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if *out == nil {
|
||||
*out = make(map[string]*api.AuthInfo)
|
||||
}
|
||||
if (*out)[curr.Name] == nil {
|
||||
(*out)[curr.Name] = newAuthInfo
|
||||
} else {
|
||||
|
@ -99,6 +105,9 @@ func Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(in *[]Na
|
|||
if err := Convert_v1_Context_To_api_Context(&curr.Context, newContext, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if *out == nil {
|
||||
*out = make(map[string]*api.Context)
|
||||
}
|
||||
if (*out)[curr.Name] == nil {
|
||||
(*out)[curr.Name] = newContext
|
||||
} else {
|
||||
|
@ -133,6 +142,9 @@ func Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(in *[]Named
|
|||
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&curr.Extension, &newExtension, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if *out == nil {
|
||||
*out = make(map[string]runtime.Object)
|
||||
}
|
||||
if (*out)[curr.Name] == nil {
|
||||
(*out)[curr.Name] = newExtension
|
||||
} else {
|
||||
|
|
|
@ -3,8 +3,8 @@ package version
|
|||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "17"
|
||||
gitVersion = "v1.17.0-k3s.1"
|
||||
gitCommit = "dacbba10ba597c72898e0ee747f9a855ab093bc9"
|
||||
gitVersion = "v1.17.1-k3s.1"
|
||||
gitCommit = "c33b87d04b01f05ee53569e2807a7c85bb8a0ddc"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2019-12-11T20:03:48Z"
|
||||
buildDate = "2020-01-15T17:03:01Z"
|
||||
)
|
||||
|
|
|
@ -957,16 +957,6 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
|||
// this error since all subsequent creations will fail.
|
||||
return
|
||||
}
|
||||
if errors.IsTimeout(err) {
|
||||
// Pod is created but its initialization has timed out.
|
||||
// If the initialization is successful eventually, the
|
||||
// controller will observe the creation via the informer.
|
||||
// If the initialization fails, or if the pod keeps
|
||||
// uninitialized for a long time, the informer will not
|
||||
// receive any update, and the controller will create a new
|
||||
// pod when the expectation expires.
|
||||
return
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
|
|
|
@ -777,16 +777,6 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
|||
// this error since all subsequent creations will fail.
|
||||
return
|
||||
}
|
||||
if errors.IsTimeout(err) {
|
||||
// Pod is created but its initialization has timed out.
|
||||
// If the initialization is successful eventually, the
|
||||
// controller will observe the creation via the informer.
|
||||
// If the initialization fails, or if the pod keeps
|
||||
// uninitialized for a long time, the informer will not
|
||||
// receive any update, and the controller will create a new
|
||||
// pod when the expectation expires.
|
||||
return
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
defer utilruntime.HandleError(err)
|
||||
|
|
|
@ -575,16 +575,6 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
|
|||
// anything because any creation will fail
|
||||
return nil
|
||||
}
|
||||
if errors.IsTimeout(err) {
|
||||
// Pod is created but its initialization has timed out.
|
||||
// If the initialization is successful eventually, the
|
||||
// controller will observe the creation via the informer.
|
||||
// If the initialization fails, or if the pod keeps
|
||||
// uninitialized for a long time, the informer will not
|
||||
// receive any update, and the controller will create a new
|
||||
// pod when the expectation expires.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
|
|
@ -71,7 +71,6 @@ go_library(
|
|||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library",
|
||||
"//vendor/k8s.io/utils/io:go_default_library",
|
||||
"//vendor/k8s.io/utils/mount:go_default_library",
|
||||
"//vendor/k8s.io/utils/path:go_default_library",
|
||||
|
@ -122,7 +121,6 @@ go_library(
|
|||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library",
|
||||
"//vendor/k8s.io/utils/io:go_default_library",
|
||||
"//vendor/k8s.io/utils/mount:go_default_library",
|
||||
"//vendor/k8s.io/utils/path:go_default_library",
|
||||
|
|
|
@ -32,7 +32,6 @@ import (
|
|||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
libcontainersystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"k8s.io/klog"
|
||||
utilio "k8s.io/utils/io"
|
||||
"k8s.io/utils/mount"
|
||||
|
@ -421,11 +420,7 @@ func setupKernelTunables(option KernelTunableBehavior) error {
|
|||
klog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
|
||||
err = sysctl.SetSysctl(flag, expectedValue)
|
||||
if err != nil {
|
||||
if libcontainersystem.RunningInUserNS() {
|
||||
klog.Warningf("Updating kernel flag failed: %v: %v (running in UserNS)", flag, err)
|
||||
} else {
|
||||
errList = append(errList, err)
|
||||
}
|
||||
errList = append(errList, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
|
@ -167,8 +168,11 @@ func (kl *Kubelet) GetPods() []*v1.Pod {
|
|||
// a kubelet running without apiserver requires an additional
|
||||
// update of the static pod status. See #57106
|
||||
for _, p := range pods {
|
||||
if status, ok := kl.statusManager.GetPodStatus(p.UID); ok {
|
||||
p.Status = status
|
||||
if kubelettypes.IsStaticPod(p) {
|
||||
if status, ok := kl.statusManager.GetPodStatus(p.UID); ok {
|
||||
klog.V(2).Infof("status for pod %v updated to %v", p.Name, status)
|
||||
p.Status = status
|
||||
}
|
||||
}
|
||||
}
|
||||
return pods
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -540,7 +540,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
|
|||
}
|
||||
|
||||
oldStatus := pod.Status.DeepCopy()
|
||||
newPod, patchBytes, err := statusutil.PatchPodStatus(m.kubeClient, pod.Namespace, pod.Name, *oldStatus, mergePodStatus(*oldStatus, status.status))
|
||||
newPod, patchBytes, err := statusutil.PatchPodStatus(m.kubeClient, pod.Namespace, pod.Name, pod.UID, *oldStatus, mergePodStatus(*oldStatus, status.status))
|
||||
klog.V(3).Infof("Patch status for pod %q with %q", format.Pod(pod), patchBytes)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err)
|
||||
|
|
|
@ -13,8 +13,6 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/registry/core/service/allocator:go_default_library",
|
||||
"//vendor/k8s.io/utils/integer:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -19,14 +19,10 @@ package ipallocator
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"math/big"
|
||||
"net"
|
||||
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/allocator"
|
||||
"k8s.io/utils/integer"
|
||||
utilnet "k8s.io/utils/net"
|
||||
"math/big"
|
||||
"net"
|
||||
)
|
||||
|
||||
// Interface manages the allocation of IP addresses out of a range. Interface
|
||||
|
@ -84,8 +80,8 @@ type Range struct {
|
|||
|
||||
// NewAllocatorCIDRRange creates a Range over a net.IPNet, calling allocatorFactory to construct the backing store.
|
||||
func NewAllocatorCIDRRange(cidr *net.IPNet, allocatorFactory allocator.AllocatorFactory) (*Range, error) {
|
||||
max := integer.Int64Min(utilnet.RangeSize(cidr), 1<<16)
|
||||
base := utilnet.BigForIP(cidr.IP)
|
||||
max := RangeSize(cidr)
|
||||
base := bigForIP(cidr.IP)
|
||||
rangeSpec := cidr.String()
|
||||
|
||||
r := Range{
|
||||
|
@ -173,7 +169,7 @@ func (r *Range) AllocateNext() (net.IP, error) {
|
|||
if !ok {
|
||||
return nil, ErrFull
|
||||
}
|
||||
return utilnet.AddIPOffset(r.base, offset), nil
|
||||
return addIPOffset(r.base, offset), nil
|
||||
}
|
||||
|
||||
// Release releases the IP back to the pool. Releasing an
|
||||
|
@ -191,7 +187,7 @@ func (r *Range) Release(ip net.IP) error {
|
|||
// ForEach calls the provided function for each allocated IP.
|
||||
func (r *Range) ForEach(fn func(net.IP)) {
|
||||
r.alloc.ForEach(func(offset int) {
|
||||
ip, _ := utilnet.GetIndexedIP(r.net, offset+1) // +1 because Range doesn't store IP 0
|
||||
ip, _ := GetIndexedIP(r.net, offset+1) // +1 because Range doesn't store IP 0
|
||||
fn(ip)
|
||||
})
|
||||
}
|
||||
|
@ -249,8 +245,49 @@ func (r *Range) contains(ip net.IP) (bool, int) {
|
|||
return true, offset
|
||||
}
|
||||
|
||||
// bigForIP creates a big.Int based on the provided net.IP
|
||||
func bigForIP(ip net.IP) *big.Int {
|
||||
b := ip.To4()
|
||||
if b == nil {
|
||||
b = ip.To16()
|
||||
}
|
||||
return big.NewInt(0).SetBytes(b)
|
||||
}
|
||||
|
||||
// addIPOffset adds the provided integer offset to a base big.Int representing a
|
||||
// net.IP
|
||||
func addIPOffset(base *big.Int, offset int) net.IP {
|
||||
return net.IP(big.NewInt(0).Add(base, big.NewInt(int64(offset))).Bytes())
|
||||
}
|
||||
|
||||
// calculateIPOffset calculates the integer offset of ip from base such that
|
||||
// base + offset = ip. It requires ip >= base.
|
||||
func calculateIPOffset(base *big.Int, ip net.IP) int {
|
||||
return int(big.NewInt(0).Sub(utilnet.BigForIP(ip), base).Int64())
|
||||
return int(big.NewInt(0).Sub(bigForIP(ip), base).Int64())
|
||||
}
|
||||
|
||||
// RangeSize returns the size of a range in valid addresses.
|
||||
func RangeSize(subnet *net.IPNet) int64 {
|
||||
ones, bits := subnet.Mask.Size()
|
||||
if bits == 32 && (bits-ones) >= 31 || bits == 128 && (bits-ones) >= 127 {
|
||||
return 0
|
||||
}
|
||||
// For IPv6, the max size will be limited to 65536
|
||||
// This is due to the allocator keeping track of all the
|
||||
// allocated IP's in a bitmap. This will keep the size of
|
||||
// the bitmap to 64k.
|
||||
if bits == 128 && (bits-ones) >= 16 {
|
||||
return int64(1) << uint(16)
|
||||
} else {
|
||||
return int64(1) << uint(bits-ones)
|
||||
}
|
||||
}
|
||||
|
||||
// GetIndexedIP returns a net.IP that is subnet.IP + index in the contiguous IP space.
|
||||
func GetIndexedIP(subnet *net.IPNet, index int) (net.IP, error) {
|
||||
ip := addIPOffset(bigForIP(subnet.IP), index)
|
||||
if !subnet.Contains(ip) {
|
||||
return nil, fmt.Errorf("can't generate IP with index %d from subnet. subnet too small. subnet: %q", index, subnet)
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ go_library(
|
|||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
|
@ -20,6 +21,7 @@ go_test(
|
|||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -20,15 +20,16 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// PatchPodStatus patches pod status.
|
||||
func PatchPodStatus(c clientset.Interface, namespace, name string, oldPodStatus, newPodStatus v1.PodStatus) (*v1.Pod, []byte, error) {
|
||||
patchBytes, err := preparePatchBytesForPodStatus(namespace, name, oldPodStatus, newPodStatus)
|
||||
func PatchPodStatus(c clientset.Interface, namespace, name string, uid types.UID, oldPodStatus, newPodStatus v1.PodStatus) (*v1.Pod, []byte, error) {
|
||||
patchBytes, err := preparePatchBytesForPodStatus(namespace, name, uid, oldPodStatus, newPodStatus)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -40,7 +41,7 @@ func PatchPodStatus(c clientset.Interface, namespace, name string, oldPodStatus,
|
|||
return updatedPod, patchBytes, nil
|
||||
}
|
||||
|
||||
func preparePatchBytesForPodStatus(namespace, name string, oldPodStatus, newPodStatus v1.PodStatus) ([]byte, error) {
|
||||
func preparePatchBytesForPodStatus(namespace, name string, uid types.UID, oldPodStatus, newPodStatus v1.PodStatus) ([]byte, error) {
|
||||
oldData, err := json.Marshal(v1.Pod{
|
||||
Status: oldPodStatus,
|
||||
})
|
||||
|
@ -49,7 +50,8 @@ func preparePatchBytesForPodStatus(namespace, name string, oldPodStatus, newPodS
|
|||
}
|
||||
|
||||
newData, err := json.Marshal(v1.Pod{
|
||||
Status: newPodStatus,
|
||||
ObjectMeta: metav1.ObjectMeta{UID: uid}, // only put the uid in the new object to ensure it appears in the patch as a precondition
|
||||
Status: newPodStatus,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to Marshal newData for pod %q/%q: %v", namespace, name, err)
|
||||
|
|
|
@ -29,17 +29,20 @@ import (
|
|||
utilexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
// exclude those used by azure as resource and OS root in /dev/disk/azure
|
||||
// exclude those used by azure as resource and OS root in /dev/disk/azure, /dev/disk/azure/scsi0
|
||||
// "/dev/disk/azure/scsi0" dir is populated in Standard_DC4s/DC2s on Ubuntu 18.04
|
||||
func listAzureDiskPath(io ioHandler) []string {
|
||||
azureDiskPath := "/dev/disk/azure/"
|
||||
var azureDiskList []string
|
||||
if dirs, err := io.ReadDir(azureDiskPath); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
diskPath := azureDiskPath + name
|
||||
if link, linkErr := io.Readlink(diskPath); linkErr == nil {
|
||||
sd := link[(libstrings.LastIndex(link, "/") + 1):]
|
||||
azureDiskList = append(azureDiskList, sd)
|
||||
azureResourcePaths := []string{"/dev/disk/azure/", "/dev/disk/azure/scsi0/"}
|
||||
for _, azureDiskPath := range azureResourcePaths {
|
||||
if dirs, err := io.ReadDir(azureDiskPath); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
diskPath := filepath.Join(azureDiskPath, name)
|
||||
if link, linkErr := io.Readlink(diskPath); linkErr == nil {
|
||||
sd := link[(libstrings.LastIndex(link, "/") + 1):]
|
||||
azureDiskList = append(azureDiskList, sd)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -240,7 +240,8 @@ func doCleanSubPaths(mounter mount.Interface, podDir string, volumeName string)
|
|||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
// We need to check that info is not nil. This may happen when the incoming err is not nil due to stale mounts or permission errors.
|
||||
if info != nil && info.IsDir() {
|
||||
// skip subdirs of the volume: it only matters the first level to unmount, otherwise it would try to unmount subdir of the volume
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (
|
|||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, tenantID)
|
||||
oauthConfig, err := adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, tenantID, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating the OAuth config: %v", err)
|
||||
}
|
||||
|
|
|
@ -46,6 +46,15 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
|||
|
||||
if isManagedDisk {
|
||||
managedDisk := &compute.ManagedDiskParameters{ID: &diskURI}
|
||||
if diskEncryptionSetID == "" {
|
||||
if vm.StorageProfile.OsDisk != nil &&
|
||||
vm.StorageProfile.OsDisk.ManagedDisk != nil &&
|
||||
vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet != nil &&
|
||||
vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID != nil {
|
||||
// set diskEncryptionSet as value of os disk by default
|
||||
diskEncryptionSetID = *vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID
|
||||
}
|
||||
}
|
||||
if diskEncryptionSetID != "" {
|
||||
managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &diskEncryptionSetID}
|
||||
}
|
||||
|
|
|
@ -48,6 +48,15 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
|||
}
|
||||
if isManagedDisk {
|
||||
managedDisk := &compute.ManagedDiskParameters{ID: &diskURI}
|
||||
if diskEncryptionSetID == "" {
|
||||
if vm.StorageProfile.OsDisk != nil &&
|
||||
vm.StorageProfile.OsDisk.ManagedDisk != nil &&
|
||||
vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet != nil &&
|
||||
vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID != nil {
|
||||
// set diskEncryptionSet as value of os disk by default
|
||||
diskEncryptionSetID = *vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID
|
||||
}
|
||||
}
|
||||
if diskEncryptionSetID != "" {
|
||||
managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &diskEncryptionSetID}
|
||||
}
|
||||
|
|
|
@ -1641,7 +1641,7 @@ func equalLoadBalancingRulePropertiesFormat(s *network.LoadBalancingRuleProperti
|
|||
reflect.DeepEqual(s.EnableTCPReset, t.EnableTCPReset) &&
|
||||
reflect.DeepEqual(s.DisableOutboundSnat, t.DisableOutboundSnat)
|
||||
|
||||
if wantLB {
|
||||
if wantLB && s.IdleTimeoutInMinutes != nil && t.IdleTimeoutInMinutes != nil {
|
||||
return properties && reflect.DeepEqual(s.IdleTimeoutInMinutes, t.IdleTimeoutInMinutes)
|
||||
}
|
||||
return properties
|
||||
|
|
|
@ -464,7 +464,7 @@ func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, e
|
|||
failureDomain = as.makeZone(to.String(vm.Location), zoneID)
|
||||
} else {
|
||||
// Availability zone is not used for the node, falling back to fault domain.
|
||||
failureDomain = strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
|
||||
failureDomain = strconv.Itoa(int(to.Int32(vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain)))
|
||||
}
|
||||
|
||||
zone := cloudprovider.Zone{
|
||||
|
|
|
@ -60,6 +60,7 @@ type scaleSet struct {
|
|||
// (e.g. master nodes) may not belong to any scale sets.
|
||||
availabilitySet VMSet
|
||||
|
||||
vmssCache *timedCache
|
||||
vmssVMCache *timedCache
|
||||
availabilitySetNodesCache *timedCache
|
||||
}
|
||||
|
@ -77,6 +78,11 @@ func newScaleSet(az *Cloud) (VMSet, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssCache, err = ss.newVMSSCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssVMCache, err = ss.newVMSSVirtualMachinesCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -85,6 +91,43 @@ func newScaleSet(az *Cloud) (VMSet, error) {
|
|||
return ss, nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) getVMSS(vmssName string, crt cacheReadType) (*compute.VirtualMachineScaleSet, error) {
|
||||
getter := func(vmssName string) (*compute.VirtualMachineScaleSet, error) {
|
||||
cached, err := ss.vmssCache.Get(vmssKey, crt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vmsses := cached.(*sync.Map)
|
||||
if vmss, ok := vmsses.Load(vmssName); ok {
|
||||
result := vmss.(*vmssEntry)
|
||||
return result.vmss, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
vmss, err := getter(vmssName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if vmss != nil {
|
||||
return vmss, nil
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Couldn't find VMSS with name %s, refreshing the cache", vmssName)
|
||||
ss.vmssCache.Delete(vmssKey)
|
||||
vmss, err = getter(vmssName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if vmss == nil {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
return vmss, nil
|
||||
}
|
||||
|
||||
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
|
||||
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
|
||||
func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) {
|
||||
|
@ -903,7 +946,7 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
|
|||
}
|
||||
|
||||
for vmssName := range vmssNamesMap {
|
||||
vmss, err := ss.GetScaleSetWithRetry(service, ss.ResourceGroup, vmssName)
|
||||
vmss, err := ss.getVMSS(vmssName, cacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1208,7 +1251,7 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
|
|||
}
|
||||
|
||||
for vmssName := range vmssNamesMap {
|
||||
vmss, err := ss.GetScaleSetWithRetry(service, ss.ResourceGroup, vmssName)
|
||||
vmss, err := ss.getVMSS(vmssName, cacheReadTypeDefault)
|
||||
|
||||
// When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
|
||||
// Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it.
|
||||
|
|
|
@ -19,7 +19,7 @@ limitations under the License.
|
|||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -35,10 +35,12 @@ var (
|
|||
vmssNameSeparator = "_"
|
||||
vmssCacheSeparator = "#"
|
||||
|
||||
vmssKey = "k8svmssKey"
|
||||
vmssVirtualMachinesKey = "k8svmssVirtualMachinesKey"
|
||||
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
|
||||
|
||||
availabilitySetNodesCacheTTL = 15 * time.Minute
|
||||
vmssTTL = 10 * time.Minute
|
||||
vmssVirtualMachinesTTL = 10 * time.Minute
|
||||
)
|
||||
|
||||
|
@ -50,8 +52,43 @@ type vmssVirtualMachinesEntry struct {
|
|||
lastUpdate time.Time
|
||||
}
|
||||
|
||||
func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf("%s%s%s", scaleSetName, vmssNameSeparator, instanceID)
|
||||
type vmssEntry struct {
|
||||
vmss *compute.VirtualMachineScaleSet
|
||||
lastUpdate time.Time
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVMSSCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
localCache := &sync.Map{} // [vmssName]*vmssEntry
|
||||
|
||||
allResourceGroups, err := ss.GetResourceGroups()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, resourceGroup := range allResourceGroups.List() {
|
||||
allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(context.Background(), resourceGroup)
|
||||
if err != nil {
|
||||
klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, scaleSet := range allScaleSets {
|
||||
if scaleSet.Name == nil || *scaleSet.Name == "" {
|
||||
klog.Warning("failed to get the name of VMSS")
|
||||
continue
|
||||
}
|
||||
localCache.Store(*scaleSet.Name, &vmssEntry{
|
||||
vmss: &scaleSet,
|
||||
lastUpdate: time.Now().UTC(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return localCache, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmssTTL, getter)
|
||||
}
|
||||
|
||||
func extractVmssVMName(name string) (string, string, error) {
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
option "google.golang.org/api/option"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
|
@ -62,7 +63,10 @@ func fakeClusterID(clusterID string) ClusterID {
|
|||
|
||||
// NewFakeGCECloud constructs a fake GCE Cloud from the cluster values.
|
||||
func NewFakeGCECloud(vals TestClusterValues) *Cloud {
|
||||
service, _ := compute.NewService(context.Background())
|
||||
service, err := compute.NewService(context.Background(), option.WithoutAuthentication())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
gce := &Cloud{
|
||||
region: vals.Region,
|
||||
service: service,
|
||||
|
|
|
@ -572,7 +572,7 @@ func getLocalIP() ([]v1.NodeAddress, error) {
|
|||
} else {
|
||||
for _, addr := range localAddrs {
|
||||
if ipnet, ok := addr.(*net.IPNet); ok {
|
||||
if ipnet.IP.To4() != nil {
|
||||
if !ipnet.IP.IsLinkLocalUnicast() {
|
||||
// Filter external IP by MAC address OUIs from vCenter and from ESX
|
||||
vmMACAddr := strings.ToLower(i.HardwareAddr.String())
|
||||
// Making sure that the MAC address is long enough
|
||||
|
@ -683,7 +683,7 @@ func (vs *VSphere) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName
|
|||
for _, v := range vmMoList[0].Guest.Net {
|
||||
if vs.cfg.Network.PublicNetwork == v.Network {
|
||||
for _, ip := range v.IpAddress {
|
||||
if net.ParseIP(ip).To4() != nil {
|
||||
if !net.ParseIP(ip).IsLinkLocalUnicast() {
|
||||
nodehelpers.AddToNodeAddresses(&addrs,
|
||||
v1.NodeAddress{
|
||||
Type: v1.NodeExternalIP,
|
||||
|
|
|
@ -1104,7 +1104,7 @@ gopkg.in/square/go-jose.v2/jwt
|
|||
gopkg.in/warnings.v0
|
||||
# gopkg.in/yaml.v2 v2.2.4
|
||||
gopkg.in/yaml.v2
|
||||
# k8s.io/api v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.0-k3s.1
|
||||
# k8s.io/api v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.1-k3s.1
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
k8s.io/api/admissionregistration/v1
|
||||
|
@ -1148,7 +1148,7 @@ k8s.io/api/settings/v1alpha1
|
|||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.0-k3s.1
|
||||
# k8s.io/apiextensions-apiserver v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.1-k3s.1
|
||||
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
||||
|
@ -1196,7 +1196,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
|
|||
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||
# k8s.io/apimachinery v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.0-k3s.1
|
||||
# k8s.io/apimachinery v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.1-k3s.1
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
k8s.io/apimachinery/pkg/api/meta
|
||||
|
@ -1258,7 +1258,7 @@ k8s.io/apimachinery/pkg/watch
|
|||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.0-k3s.1
|
||||
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.1-k3s.1
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/configuration
|
||||
k8s.io/apiserver/pkg/admission/initializer
|
||||
|
@ -1375,7 +1375,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
|
|||
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.0-k3s.1
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.1-k3s.1
|
||||
k8s.io/cli-runtime/pkg/genericclioptions
|
||||
k8s.io/cli-runtime/pkg/kustomize
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
|
||||
|
@ -1388,7 +1388,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
|
|||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
|
||||
k8s.io/cli-runtime/pkg/printers
|
||||
k8s.io/cli-runtime/pkg/resource
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.0-k3s.1
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.1-k3s.1
|
||||
k8s.io/client-go/discovery
|
||||
k8s.io/client-go/discovery/cached
|
||||
k8s.io/client-go/discovery/cached/disk
|
||||
|
@ -1581,20 +1581,20 @@ k8s.io/client-go/util/jsonpath
|
|||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.0-k3s.1
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.1-k3s.1
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/node/helpers
|
||||
k8s.io/cloud-provider/service/helpers
|
||||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/volume/errors
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.0-k3s.1
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.1-k3s.1
|
||||
k8s.io/cluster-bootstrap/token/api
|
||||
k8s.io/cluster-bootstrap/token/jws
|
||||
k8s.io/cluster-bootstrap/token/util
|
||||
k8s.io/cluster-bootstrap/util/secrets
|
||||
k8s.io/cluster-bootstrap/util/tokens
|
||||
# k8s.io/code-generator v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.0-k3s.1
|
||||
# k8s.io/code-generator v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.1-k3s.1
|
||||
k8s.io/code-generator/cmd/client-gen/args
|
||||
k8s.io/code-generator/cmd/client-gen/generators
|
||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||
|
@ -1609,7 +1609,7 @@ k8s.io/code-generator/cmd/lister-gen/args
|
|||
k8s.io/code-generator/cmd/lister-gen/generators
|
||||
k8s.io/code-generator/pkg/namer
|
||||
k8s.io/code-generator/pkg/util
|
||||
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.0-k3s.1
|
||||
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.1-k3s.1
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/cli/globalflag
|
||||
k8s.io/component-base/codec
|
||||
|
@ -1626,10 +1626,10 @@ k8s.io/component-base/metrics/prometheus/version
|
|||
k8s.io/component-base/metrics/prometheus/workqueue
|
||||
k8s.io/component-base/version
|
||||
k8s.io/component-base/version/verflag
|
||||
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.0-k3s.1
|
||||
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.1-k3s.1
|
||||
k8s.io/cri-api/pkg/apis
|
||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.0-k3s.1
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.1-k3s.1
|
||||
k8s.io/csi-translation-lib
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
# k8s.io/gengo v0.0.0-20191120174120-e74f70b9b27e
|
||||
|
@ -1644,7 +1644,7 @@ k8s.io/gengo/types
|
|||
k8s.io/heapster/metrics/api/v1/types
|
||||
# k8s.io/klog v1.0.0
|
||||
k8s.io/klog
|
||||
# k8s.io/kube-aggregator v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.0-k3s.1
|
||||
# k8s.io/kube-aggregator v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.1-k3s.1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
|
@ -1672,7 +1672,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
|
|||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.0-k3s.1
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.1-k3s.1
|
||||
k8s.io/kube-controller-manager/config/v1alpha1
|
||||
# k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a
|
||||
k8s.io/kube-openapi/pkg/aggregator
|
||||
|
@ -1683,12 +1683,12 @@ k8s.io/kube-openapi/pkg/schemaconv
|
|||
k8s.io/kube-openapi/pkg/util
|
||||
k8s.io/kube-openapi/pkg/util/proto
|
||||
k8s.io/kube-openapi/pkg/util/proto/validation
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.0-k3s.1
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.1-k3s.1
|
||||
k8s.io/kube-proxy/config/v1alpha1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.0-k3s.1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.1-k3s.1
|
||||
k8s.io/kube-scheduler/config/v1
|
||||
k8s.io/kube-scheduler/config/v1alpha1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.0-k3s.1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.1-k3s.1
|
||||
k8s.io/kubectl/pkg/apps
|
||||
k8s.io/kubectl/pkg/cmd
|
||||
k8s.io/kubectl/pkg/cmd/annotate
|
||||
|
@ -1764,11 +1764,11 @@ k8s.io/kubectl/pkg/util/storage
|
|||
k8s.io/kubectl/pkg/util/templates
|
||||
k8s.io/kubectl/pkg/util/term
|
||||
k8s.io/kubectl/pkg/validation
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.0-k3s.1
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.1-k3s.1
|
||||
k8s.io/kubelet/config/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
||||
# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.17.0-k3s.1
|
||||
# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.17.1-k3s.1
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
|
||||
|
@ -2506,7 +2506,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
|
|||
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.0-k3s.1
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.1-k3s.1
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/azure
|
||||
k8s.io/legacy-cloud-providers/azure/auth
|
||||
|
@ -2515,7 +2515,7 @@ k8s.io/legacy-cloud-providers/openstack
|
|||
k8s.io/legacy-cloud-providers/vsphere
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.0-k3s.1
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.1-k3s.1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
||||
|
|
Loading…
Reference in New Issue