mirror of https://github.com/k3s-io/k3s
Update to v1.16.13-k3s1
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>pull/2030/head
parent
81f49ac358
commit
d833224d96
50
go.mod
50
go.mod
|
@ -34,31 +34,31 @@ replace (
|
|||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
||||
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
|
||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.11-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.11-k3s1
|
||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.11-k3s1
|
||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.11-k3s1
|
||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.11-k3s1
|
||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.11-k3s1
|
||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.11-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.11-k3s1
|
||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.11-k3s1
|
||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.11-k3s1
|
||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.11-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.11-k3s1
|
||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.11-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.11-k3s1
|
||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.11-k3s1
|
||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.11-k3s1
|
||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.11-k3s1
|
||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.11-k3s1
|
||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.16.11-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.11-k3s1
|
||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.11-k3s1
|
||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.16.11-k3s1
|
||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.11-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.16.11-k3s1
|
||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.16.11-k3s1
|
||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.13-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.13-k3s1
|
||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.13-k3s1
|
||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.13-k3s1
|
||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.13-k3s1
|
||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.13-k3s1
|
||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.13-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.13-k3s1
|
||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.13-k3s1
|
||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.13-k3s1
|
||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.13-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.13-k3s1
|
||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.13-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.13-k3s1
|
||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.13-k3s1
|
||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.13-k3s1
|
||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.13-k3s1
|
||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.13-k3s1
|
||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.16.13-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.13-k3s1
|
||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.13-k3s1
|
||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.16.13-k3s1
|
||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.13-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.16.13-k3s1
|
||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.16.13-k3s1
|
||||
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
||||
)
|
||||
|
||||
|
|
86
go.sum
86
go.sum
|
@ -601,49 +601,49 @@ github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd h1:KPnQ
|
|||
github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd/go.mod h1:QYmg8cqWPPfIbpEuhtJbEdWwA6PEKSY016Z6EdfL9+8=
|
||||
github.com/rancher/kine v0.2.5 h1:UE0HrxloO95zPEXYN/n8Rwejx276fc7s8I5JbJcLdmY=
|
||||
github.com/rancher/kine v0.2.5/go.mod h1:SdBUuE7e3XyrJvdBxCl9TMMapF+wyZnMZSP/H59OqNE=
|
||||
github.com/rancher/kubernetes v1.16.11-k3s1 h1:6Oyo0d0MHwZnyRT2Con7AovmX1s5gDR/5O2dHhAIyJA=
|
||||
github.com/rancher/kubernetes v1.16.11-k3s1/go.mod h1:I1/NbQo7HFnBycd5xkM76qBKCQx90j4t4PMD6MbJ5K0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.11-k3s1 h1:IxVATqeYh6A12qwFM/laso6h/GG/65MUChNeXxv5kos=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.11-k3s1/go.mod h1:hF711SYP9H3Bqm/pquHb7I9hmYCbyZmz7AZRaXu1rqE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.11-k3s1 h1:mSfzGv3j4jXAvP9nGy88ymC1uDZy4uog2GnVWInPqSE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.11-k3s1/go.mod h1:VBfb4GYcC+wWE9En7Qiiy2GtEYHyu7+OF4va++HcNEA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.11-k3s1 h1:vR957y425Bt4yilH86b+DhmadBdPDq3D/NBnSReBePU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.11-k3s1/go.mod h1:DHoqDukC6AO78SjdpZDs4uxWsr8ELOtSgnpfcmuv3cw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.11-k3s1 h1:ysANGm5zExMP+fjIlGZElWOG2kC8ei9+CX94pRLlkI4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.11-k3s1/go.mod h1:1HjCjgDtd6sh87IYWH4Mpt6Zucx/CPcRFdw2zxc0Csw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.11-k3s1 h1:ZgG6DJpc1GrcQN6YVRoIE1dMbvhH78eDYBmJa4kD82w=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.11-k3s1/go.mod h1:VpFACNqiYpPtmofpGz77fAfDTDyOnbvU0rJ2CpM40A4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.11-k3s1 h1:ugFvaQYTZHx2ZjIxCah7vRNFl5sIt/VanyNoSS3p7+0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.11-k3s1/go.mod h1:o1JsPLiZ5bL+KsLEe/wHo65emfcWBAsa0hSpWuMRX80=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.11-k3s1 h1:QEGbAfTst2hrIKQFBy5F0LU85WCOD3/2YwS2lLHYgxI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.11-k3s1/go.mod h1:mkOG2NRE3z5O+q3d1mmg3DiltNAqprjQWCJEqS941Sk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.11-k3s1 h1:LRk46IXSgDZ3dk/huoNb8TDOTk8gT2NO+TFwonEC9FU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.11-k3s1/go.mod h1:VRJM3GFPwH5SheYrgtmNLDThbGZV36jQbE4KLzoI9bg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.11-k3s1 h1:7wzxNKfEdewexZNkmwg6qF4kLepINJvWJnUvSA+6xUk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.11-k3s1/go.mod h1:uv2Gaa8WhyS9L9Zx7EUK8aan29bm30xfX3KU/Ch440E=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.11-k3s1 h1:lgN8C+tCOKY84KdWpyoD8zQzCe+YAN1kCCra35ym/JA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.11-k3s1/go.mod h1:f4FAu7hsUOeHiLm8TTj5sA9RbgPp4cMYiMD7III70Uc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.11-k3s1 h1:aB69ViRVcyWdXC3H1QkuH08Nk41nXk+9rrNlqOGkxSc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.11-k3s1/go.mod h1:cBkf5Pgf0kssF+HGNYRkpkOLu2WYWB5OugNuN1DDCTI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.11-k3s1 h1:0TzRp3voEBlN3i5SLUmvVQtilHK2hCpU0x4zJDwDxSo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.11-k3s1/go.mod h1:74gMNDhIex44m9pwyDxvegJ/1iwIV+GeY4vnqW+elB0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.11-k3s1 h1:s4bYZn/8a7JTygFr3epy2omYKQ84W172y6Ev/5veJ+w=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.11-k3s1/go.mod h1:SlYQUXX/Hw1T85y62+sOJfqHQmeWD26skTx0NdA8zH8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.11-k3s1 h1:MkHyiG0k2mqb7L+IQuVpZYfF/eCS+avPJkZo+8MqNVc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.11-k3s1/go.mod h1:FrrkxvHzedrKyAIOTcAJjBonY3PstmNd+OlcwBg+j3Y=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.11-k3s1 h1:xxRZQ1/NbcN98CuwHAoiblVLMLX7tzkMsE1uwHzFhWY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.11-k3s1/go.mod h1:xy5tBvt9vGo0qIyDI+z0lQRj4FBPmDvVTCkB1vnKg4w=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.11-k3s1 h1:8e++mKD3thvPnmTRJG8JlnzDI6EqH/fXIKZzvHKY+J8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.11-k3s1/go.mod h1:jqwAYW696VyYRvVNjjqC4MYV0N6SiKdfx+nyfPlIwqM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.11-k3s1 h1:wqqgi+v4JuGdm/+qY+0DYa4ugaFhLIlS+vJeVI2lXrE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.11-k3s1/go.mod h1:LUej6lcxGM6bqhhyBlQWI+HOHoWYjjphGkZ95geH4zA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.11-k3s1 h1:7TDr4BjkFWVfz9zmCbIC+/s+pDmFYGoOZ1rt13J3acs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.11-k3s1/go.mod h1:TYwPJHyAg97PV1XzgDrp/il12tZ5cwWHT6tECzCEwG0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.11-k3s1 h1:CmWudDSGaYuOajdvxX4cx6pfDEBiLmC0WVk26izl+tE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.11-k3s1/go.mod h1:tUmOWcMug23gITlfkI8tDjgeDdD7xiNR6ylYS0LavV4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.11-k3s1 h1:nwnwxKfWP4r0o6S9fMnIkJkzLRZFoRWEbut1CBOorH8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.11-k3s1/go.mod h1:pM/0ywERZODloMTAJKiUWRfmKBEhCf8oWgzBm1N5M/o=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.11-k3s1/go.mod h1:3PrDeYVltXOLKVrQfe+DdFbJ9HL59vEOq9cob0Twfxo=
|
||||
github.com/rancher/kubernetes v1.16.13-k3s1 h1:jqiwkowG6bokzvYucdV475vTB3zoynAD4ywsOftIdJs=
|
||||
github.com/rancher/kubernetes v1.16.13-k3s1/go.mod h1:I1/NbQo7HFnBycd5xkM76qBKCQx90j4t4PMD6MbJ5K0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.13-k3s1 h1:GS6aHxaoeMeyN4vuPuGW+d3srsgLUC6VREjmleou+LU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.13-k3s1/go.mod h1:hF711SYP9H3Bqm/pquHb7I9hmYCbyZmz7AZRaXu1rqE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.13-k3s1 h1:X8Rq5hISSLQ/z/fkBXiiHJhttPQw08f1lr8LX/bHTFA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.13-k3s1/go.mod h1:VBfb4GYcC+wWE9En7Qiiy2GtEYHyu7+OF4va++HcNEA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.13-k3s1 h1:DrOpZ2BNQBS95ELh591pmnrCYXBPN6i42Qe2eIoXiyU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.13-k3s1/go.mod h1:DHoqDukC6AO78SjdpZDs4uxWsr8ELOtSgnpfcmuv3cw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.13-k3s1 h1:7vs6HlDgUqEXIxYooHxxPV5gfTe6H1oON7GLWkDE4PY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.13-k3s1/go.mod h1:1HjCjgDtd6sh87IYWH4Mpt6Zucx/CPcRFdw2zxc0Csw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.13-k3s1 h1:oiRexGObV5lhNBGJhNmrrrJb2Dhf6XTXKFyOLpzN45M=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.13-k3s1/go.mod h1:VpFACNqiYpPtmofpGz77fAfDTDyOnbvU0rJ2CpM40A4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.13-k3s1 h1:A/9zg8HGH75Os8Y5tRiVLVMqS59YbXVH+8GJy79SH9M=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.13-k3s1/go.mod h1:o1JsPLiZ5bL+KsLEe/wHo65emfcWBAsa0hSpWuMRX80=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.13-k3s1 h1:e9aWVmT2wY6/iCc0TPGZnpmYBwD2hHuD1z+//W7gIPw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.13-k3s1/go.mod h1:mkOG2NRE3z5O+q3d1mmg3DiltNAqprjQWCJEqS941Sk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.13-k3s1 h1:YaJYT2v7Ps0Yv2laYdkb/RwnY7Wa2JIhAeFScX2+UJk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.13-k3s1/go.mod h1:VRJM3GFPwH5SheYrgtmNLDThbGZV36jQbE4KLzoI9bg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.13-k3s1 h1:3EGI0YkUcav8wwM1Pa8MG0yVLnCRrAKRB97I2oFkNsA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.13-k3s1/go.mod h1:uv2Gaa8WhyS9L9Zx7EUK8aan29bm30xfX3KU/Ch440E=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.13-k3s1 h1:xN+3UVHIxNk60J7quJMErRe8gOANPFvE+Ou8DepRRb0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.13-k3s1/go.mod h1:f4FAu7hsUOeHiLm8TTj5sA9RbgPp4cMYiMD7III70Uc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.13-k3s1 h1:d3kkJj9KxWbsicd96Gej/VOnBgkUaV4P4CJ09wDkszk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.13-k3s1/go.mod h1:cBkf5Pgf0kssF+HGNYRkpkOLu2WYWB5OugNuN1DDCTI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.13-k3s1 h1:kg1Em78BsxBEliMrIfsOj82y4ORKfzL0c2sDhSyEvwg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.13-k3s1/go.mod h1:74gMNDhIex44m9pwyDxvegJ/1iwIV+GeY4vnqW+elB0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.13-k3s1 h1:hHKZRnXSzpYOCWFqNlc6wLM3hdlBXsJtFIFnl/NVQbk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.13-k3s1/go.mod h1:SlYQUXX/Hw1T85y62+sOJfqHQmeWD26skTx0NdA8zH8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.13-k3s1 h1:mt6ATAp4BLp20Iz/9TZcgarpHC+/a8n18rXb/cG4+KM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.13-k3s1/go.mod h1:FrrkxvHzedrKyAIOTcAJjBonY3PstmNd+OlcwBg+j3Y=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.13-k3s1 h1:0pfkTHBM7P6sTFwyC6dBONXYhB8eJvBljiiEAfKrbRE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.13-k3s1/go.mod h1:xy5tBvt9vGo0qIyDI+z0lQRj4FBPmDvVTCkB1vnKg4w=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.13-k3s1 h1:1FmfeUjtskwLjRNA8TXMPUpoDzF4bH+6SzYouAUhB3s=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.13-k3s1/go.mod h1:jqwAYW696VyYRvVNjjqC4MYV0N6SiKdfx+nyfPlIwqM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.13-k3s1 h1:Z9wdoU+7bBC+9jdLAm30IFr12MhYElKQ34vgGlePYws=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.13-k3s1/go.mod h1:LUej6lcxGM6bqhhyBlQWI+HOHoWYjjphGkZ95geH4zA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.13-k3s1 h1:oZacu2U7hinvNvx4NmOfAHrLcgL2PezosmH4jJ2t0fE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.13-k3s1/go.mod h1:TYwPJHyAg97PV1XzgDrp/il12tZ5cwWHT6tECzCEwG0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.13-k3s1 h1:UTPsoHOQYVcUg+CtdveNuvGxh0+HtteFPrHpZ6XZatg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.13-k3s1/go.mod h1:tUmOWcMug23gITlfkI8tDjgeDdD7xiNR6ylYS0LavV4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.13-k3s1 h1:9nOZ51v0vRehRJbRyUgZIA2OYPLrCPb0PVYoUDZCMB8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.13-k3s1/go.mod h1:pM/0ywERZODloMTAJKiUWRfmKBEhCf8oWgzBm1N5M/o=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.13-k3s1/go.mod h1:3PrDeYVltXOLKVrQfe+DdFbJ9HL59vEOq9cob0Twfxo=
|
||||
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009 h1:Xsxh7fX3+2wAUJtPy8g2lZh0cYuyifqhBL0vxCIYojs=
|
||||
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
|
||||
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
|
||||
|
|
|
@ -431,7 +431,7 @@ redirectLoop:
|
|||
|
||||
// Only follow redirects to the same host. Otherwise, propagate the redirect response back.
|
||||
if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() {
|
||||
break redirectLoop
|
||||
return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname())
|
||||
}
|
||||
|
||||
// Reset the connection.
|
||||
|
|
|
@ -298,6 +298,16 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques
|
|||
rawResponse = headerBytes
|
||||
}
|
||||
|
||||
// If the backend did not upgrade the request, return an error to the client. If the response was
|
||||
// an error, the error is forwarded directly after the connection is hijacked. Otherwise, just
|
||||
// return a generic error here.
|
||||
if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols && backendHTTPResponse.StatusCode < 400 {
|
||||
err := fmt.Errorf("invalid upgrade response: status code %d", backendHTTPResponse.StatusCode)
|
||||
klog.Errorf("Proxy upgrade error: %v", err)
|
||||
h.Responder.Error(w, req, err)
|
||||
return true
|
||||
}
|
||||
|
||||
// Once the connection is hijacked, the ErrorResponder will no longer work, so
|
||||
// hijacking should be the last step in the upgrade.
|
||||
requestHijacker, ok := w.(http.Hijacker)
|
||||
|
|
|
@ -561,7 +561,8 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G
|
|||
}
|
||||
|
||||
genericApiServerHookName := "generic-apiserver-start-informers"
|
||||
if c.SharedInformerFactory != nil && !s.isPostStartHookRegistered(genericApiServerHookName) {
|
||||
if c.SharedInformerFactory != nil {
|
||||
if !s.isPostStartHookRegistered(genericApiServerHookName) {
|
||||
err := s.AddPostStartHook(genericApiServerHookName, func(context PostStartHookContext) error {
|
||||
c.SharedInformerFactory.Start(context.StopCh)
|
||||
return nil
|
||||
|
@ -569,6 +570,12 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: Once we get rid of /healthz consider changing this to post-start-hook.
|
||||
err = s.addReadyzChecks(healthz.NewInformerSyncHealthz(c.SharedInformerFactory))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, delegateCheck := range delegationTarget.HealthzChecks() {
|
||||
|
|
|
@ -27,6 +27,7 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/server/httplog"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
|
@ -80,6 +81,39 @@ func (l *log) Check(_ *http.Request) error {
|
|||
return fmt.Errorf("logging blocked")
|
||||
}
|
||||
|
||||
type informerSync struct {
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
}
|
||||
|
||||
var _ HealthChecker = &informerSync{}
|
||||
|
||||
// NewInformerSyncHealthz returns a new HealthChecker that will pass only if all informers in the given sharedInformerFactory sync.
|
||||
func NewInformerSyncHealthz(sharedInformerFactory informers.SharedInformerFactory) HealthChecker {
|
||||
return &informerSync{
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *informerSync) Name() string {
|
||||
return "informer-sync"
|
||||
}
|
||||
|
||||
func (i *informerSync) Check(_ *http.Request) error {
|
||||
stopCh := make(chan struct{})
|
||||
// Close stopCh to force checking if informers are synced now.
|
||||
close(stopCh)
|
||||
|
||||
var informersByStarted map[bool][]string
|
||||
for informerType, started := range i.sharedInformerFactory.WaitForCacheSync(stopCh) {
|
||||
informersByStarted[started] = append(informersByStarted[started], informerType.String())
|
||||
}
|
||||
|
||||
if notStarted := informersByStarted[false]; len(notStarted) > 0 {
|
||||
return fmt.Errorf("%d informers not started yet: %v", len(notStarted), notStarted)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NamedCheck returns a healthz checker for the given name and function.
|
||||
func NamedCheck(name string, check func(r *http.Request) error) HealthChecker {
|
||||
return &healthzCheck{name, check}
|
||||
|
|
|
@ -3,8 +3,8 @@ package version
|
|||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "16"
|
||||
gitVersion = "v1.16.11-k3s1"
|
||||
gitCommit = "a2d85ac363941a5280ccd3a1415e38326c843dbe"
|
||||
gitVersion = "v1.16.13-k3s1"
|
||||
gitCommit = "0b9525c21107dd05b47060ade21a607549116813"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2020-06-17T18:17:34Z"
|
||||
buildDate = "2020-07-16T00:35:22Z"
|
||||
)
|
||||
|
|
|
@ -3,8 +3,8 @@ package version
|
|||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "16"
|
||||
gitVersion = "v1.16.11-k3s1"
|
||||
gitCommit = "a2d85ac363941a5280ccd3a1415e38326c843dbe"
|
||||
gitVersion = "v1.16.13-k3s1"
|
||||
gitCommit = "0b9525c21107dd05b47060ade21a607549116813"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2020-06-17T18:17:34Z"
|
||||
buildDate = "2020-07-16T00:35:22Z"
|
||||
)
|
||||
|
|
|
@ -3,8 +3,8 @@ package version
|
|||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "16"
|
||||
gitVersion = "v1.16.11-k3s1"
|
||||
gitCommit = "a2d85ac363941a5280ccd3a1415e38326c843dbe"
|
||||
gitVersion = "v1.16.13-k3s1"
|
||||
gitCommit = "0b9525c21107dd05b47060ade21a607549116813"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2020-06-17T18:17:34Z"
|
||||
buildDate = "2020-07-16T00:35:22Z"
|
||||
)
|
||||
|
|
|
@ -4142,7 +4142,7 @@ type PodLogOptions struct {
|
|||
// If this value is in the future, no logs will be returned.
|
||||
// Only one of sinceSeconds or sinceTime may be specified.
|
||||
SinceTime *metav1.Time
|
||||
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
|
||||
// If true, add an RFC 3339 timestamp with 9 digits of fractional seconds at the beginning of every line
|
||||
// of log output.
|
||||
Timestamps bool
|
||||
// If set, the number of lines from the end of the logs to show. If not specified,
|
||||
|
|
|
@ -53,9 +53,10 @@ func (ds *dockerService) ListContainerStats(ctx context.Context, r *runtimeapi.L
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if containerStats != nil {
|
||||
stats = append(stats, containerStats)
|
||||
}
|
||||
}
|
||||
|
||||
return &runtimeapi.ListContainerStatsResponse{Stats: stats}, nil
|
||||
}
|
||||
|
|
|
@ -35,7 +35,13 @@ func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.Cont
|
|||
|
||||
hcsshim_container, err := hcsshim.OpenContainer(containerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// As we moved from using Docker stats to hcsshim directly, we may query HCS with already exited container IDs.
|
||||
// That will typically happen with init-containers in Exited state. Docker still knows about them but the HCS does not.
|
||||
// As we don't want to block stats retrieval for other containers, we only log errors.
|
||||
if !hcsshim.IsNotExist(err) && !hcsshim.IsAlreadyStopped(err) {
|
||||
klog.Errorf("Error opening container (stats will be missing) '%s': %v", containerID, err)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
defer func() {
|
||||
closeErr := hcsshim_container.Close()
|
||||
|
|
|
@ -66,6 +66,7 @@ go_library(
|
|||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
@ -90,6 +91,8 @@ type managerImpl struct {
|
|||
thresholdNotifiers []ThresholdNotifier
|
||||
// thresholdsLastUpdated is the last time the thresholdNotifiers were updated.
|
||||
thresholdsLastUpdated time.Time
|
||||
// etcHostsPath is a function that will get the etc-hosts file's path for a pod given its UID
|
||||
etcHostsPath func(podUID types.UID) string
|
||||
}
|
||||
|
||||
// ensure it implements the required interface
|
||||
|
@ -106,6 +109,7 @@ func NewManager(
|
|||
recorder record.EventRecorder,
|
||||
nodeRef *v1.ObjectReference,
|
||||
clock clock.Clock,
|
||||
etcHostsPath func(types.UID) string,
|
||||
) (Manager, lifecycle.PodAdmitHandler) {
|
||||
manager := &managerImpl{
|
||||
clock: clock,
|
||||
|
@ -121,6 +125,7 @@ func NewManager(
|
|||
thresholdsFirstObservedAt: thresholdsObservedAt{},
|
||||
dedicatedImageFs: nil,
|
||||
thresholdNotifiers: []ThresholdNotifier{},
|
||||
etcHostsPath: etcHostsPath,
|
||||
}
|
||||
return manager, manager
|
||||
}
|
||||
|
@ -503,7 +508,7 @@ func (m *managerImpl) podEphemeralStorageLimitEviction(podStats statsapi.PodStat
|
|||
} else {
|
||||
fsStatsSet = []fsStatsType{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource}
|
||||
}
|
||||
podEphemeralUsage, err := podLocalEphemeralStorageUsage(podStats, pod, fsStatsSet)
|
||||
podEphemeralUsage, err := podLocalEphemeralStorageUsage(podStats, pod, fsStatsSet, m.etcHostsPath(pod.UID))
|
||||
if err != nil {
|
||||
klog.Errorf("eviction manager: error getting pod disk usage %v", err)
|
||||
return false
|
||||
|
|
|
@ -18,6 +18,7 @@ package eviction
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -415,7 +416,7 @@ func localEphemeralVolumeNames(pod *v1.Pod) []string {
|
|||
}
|
||||
|
||||
// podLocalEphemeralStorageUsage aggregates pod local ephemeral storage usage and inode consumption for the specified stats to measure.
|
||||
func podLocalEphemeralStorageUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsStatsType) (v1.ResourceList, error) {
|
||||
func podLocalEphemeralStorageUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsStatsType, etcHostsPath string) (v1.ResourceList, error) {
|
||||
disk := resource.Quantity{Format: resource.BinarySI}
|
||||
inodes := resource.Quantity{Format: resource.DecimalSI}
|
||||
|
||||
|
@ -429,6 +430,12 @@ func podLocalEphemeralStorageUsage(podStats statsapi.PodStats, pod *v1.Pod, stat
|
|||
disk.Add(podLocalVolumeUsageList[v1.ResourceEphemeralStorage])
|
||||
inodes.Add(podLocalVolumeUsageList[resourceInodes])
|
||||
}
|
||||
if len(etcHostsPath) > 0 {
|
||||
if stat, err := os.Stat(etcHostsPath); err == nil {
|
||||
disk.Add(*resource.NewQuantity(int64(stat.Size()), resource.BinarySI))
|
||||
inodes.Add(*resource.NewQuantity(int64(1), resource.DecimalSI))
|
||||
}
|
||||
}
|
||||
return v1.ResourceList{
|
||||
v1.ResourceEphemeralStorage: disk,
|
||||
resourceInodes: inodes,
|
||||
|
|
|
@ -831,8 +831,9 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
|
||||
klet.podKillingCh = make(chan *kubecontainer.PodPair, podKillingChannelCapacity)
|
||||
|
||||
etcHostsPathFunc := func(podUID types.UID) string { return getEtcHostsPath(klet.getPodDir(podUID)) }
|
||||
// setup eviction manager
|
||||
evictionManager, evictionAdmitHandler := eviction.NewManager(klet.resourceAnalyzer, evictionConfig, killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.podManager.GetMirrorPodByPod, klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock)
|
||||
evictionManager, evictionAdmitHandler := eviction.NewManager(klet.resourceAnalyzer, evictionConfig, killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.podManager.GetMirrorPodByPod, klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock, etcHostsPathFunc)
|
||||
|
||||
klet.evictionManager = evictionManager
|
||||
klet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)
|
||||
|
|
|
@ -53,7 +53,7 @@ func (kl *Kubelet) syncNetworkUtil() {
|
|||
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkDropChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkDropChain, "-j", "MARK", "--set-xmark", dropMark); err != nil {
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkDropChain, "-j", "MARK", "--or-mark", dropMark); err != nil {
|
||||
klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkDropChain, err)
|
||||
return
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ func (kl *Kubelet) syncNetworkUtil() {
|
|||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableFilter, KubeFirewallChain,
|
||||
"-m", "comment", "--comment", "kubernetes firewall for dropping marked packets",
|
||||
"-m", "mark", "--mark", dropMark,
|
||||
"-m", "mark", "--mark", fmt.Sprintf("%s/%s", dropMark, dropMark),
|
||||
"-j", "DROP"); err != nil {
|
||||
klog.Errorf("Failed to ensure rule to drop packet marked by %v in %v chain %v: %v", KubeMarkDropChain, utiliptables.TableFilter, KubeFirewallChain, err)
|
||||
return
|
||||
|
@ -103,7 +103,7 @@ func (kl *Kubelet) syncNetworkUtil() {
|
|||
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubePostroutingChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkMasqChain, "-j", "MARK", "--set-xmark", masqueradeMark); err != nil {
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkMasqChain, "-j", "MARK", "--or-mark", masqueradeMark); err != nil {
|
||||
klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkMasqChain, err)
|
||||
return
|
||||
}
|
||||
|
@ -112,12 +112,26 @@ func (kl *Kubelet) syncNetworkUtil() {
|
|||
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, KubePostroutingChain, err)
|
||||
return
|
||||
}
|
||||
// Establish the masquerading rule.
|
||||
|
||||
// Set up KUBE-POSTROUTING to unmark and masquerade marked packets
|
||||
// NB: THIS MUST MATCH the corresponding code in the iptables and ipvs
|
||||
// modes of kube-proxy
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubePostroutingChain,
|
||||
"-m", "mark", "!", "--mark", fmt.Sprintf("%s/%s", masqueradeMark, masqueradeMark),
|
||||
"-j", "RETURN"); err != nil {
|
||||
klog.Errorf("Failed to ensure filtering rule for %v: %v", KubePostroutingChain, err)
|
||||
return
|
||||
}
|
||||
// Clear the mark to avoid re-masquerading if the packet re-traverses the network stack.
|
||||
// We know the mark bit is currently set so we can use --xor-mark to clear it (without needing
|
||||
// to Sprintf another bitmask).
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubePostroutingChain,
|
||||
"-j", "MARK", "--xor-mark", masqueradeMark); err != nil {
|
||||
klog.Errorf("Failed to ensure unmarking rule for %v: %v", KubePostroutingChain, err)
|
||||
return
|
||||
}
|
||||
masqRule := []string{
|
||||
"-m", "comment", "--comment", "kubernetes service traffic requiring SNAT",
|
||||
"-m", "mark", "--mark", masqueradeMark,
|
||||
"-j", "MASQUERADE",
|
||||
}
|
||||
if kl.iptClient.HasRandomFully() {
|
||||
|
@ -135,5 +149,5 @@ func (kl *Kubelet) syncNetworkUtil() {
|
|||
// getIPTablesMark returns the fwmark given the bit
|
||||
func getIPTablesMark(bit int) string {
|
||||
value := 1 << uint(bit)
|
||||
return fmt.Sprintf("%#08x/%#08x", value, value)
|
||||
return fmt.Sprintf("%#08x", value)
|
||||
}
|
||||
|
|
|
@ -291,10 +291,15 @@ func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.M
|
|||
}
|
||||
}
|
||||
|
||||
// getEtcHostsPath returns the full host-side path to a pod's generated /etc/hosts file
|
||||
func getEtcHostsPath(podDir string) string {
|
||||
return path.Join(podDir, "etc-hosts")
|
||||
}
|
||||
|
||||
// makeHostsMount makes the mountpoint for the hosts file that the containers
|
||||
// in a pod are injected with.
|
||||
func makeHostsMount(podDir, podIP, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) (*kubecontainer.Mount, error) {
|
||||
hostsFilePath := path.Join(podDir, "etc-hosts")
|
||||
hostsFilePath := getEtcHostsPath(podDir)
|
||||
if err := ensureHostsFile(hostsFilePath, podIP, hostName, hostDomainName, hostAliases, useHostNetwork); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ go_library(
|
|||
importpath = "k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/util/tail:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/cri-api/pkg/apis:go_default_library",
|
||||
|
|
|
@ -36,6 +36,7 @@ import (
|
|||
"k8s.io/api/core/v1"
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/util/tail"
|
||||
)
|
||||
|
||||
|
@ -48,8 +49,10 @@ import (
|
|||
// TODO(random-liu): Support log rotation.
|
||||
|
||||
const (
|
||||
// timeFormat is the time format used in the log.
|
||||
timeFormat = time.RFC3339Nano
|
||||
// timeFormatOut is the format for writing timestamps to output.
|
||||
timeFormatOut = types.RFC3339NanoFixed
|
||||
// timeFormatIn is the format for parsing timestamps from other logs.
|
||||
timeFormatIn = types.RFC3339NanoLenient
|
||||
|
||||
// stateCheckPeriod is the period to check container state while following
|
||||
// the container log. Kubelet should not keep following the log when the
|
||||
|
@ -134,9 +137,9 @@ func parseCRILog(log []byte, msg *logMessage) error {
|
|||
if idx < 0 {
|
||||
return fmt.Errorf("timestamp is not found")
|
||||
}
|
||||
msg.timestamp, err = time.Parse(timeFormat, string(log[:idx]))
|
||||
msg.timestamp, err = time.Parse(timeFormatIn, string(log[:idx]))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected timestamp format %q: %v", timeFormat, err)
|
||||
return fmt.Errorf("unexpected timestamp format %q: %v", timeFormatIn, err)
|
||||
}
|
||||
|
||||
// Parse stream type
|
||||
|
@ -233,7 +236,7 @@ func (w *logWriter) write(msg *logMessage) error {
|
|||
}
|
||||
line := msg.log
|
||||
if w.opts.timestamp {
|
||||
prefix := append([]byte(msg.timestamp.Format(timeFormat)), delimiter[0])
|
||||
prefix := append([]byte(msg.timestamp.Format(timeFormatOut)), delimiter[0])
|
||||
line = append(prefix, line...)
|
||||
}
|
||||
// If the line is longer than the remaining bytes, cut it.
|
||||
|
|
|
@ -29,4 +29,9 @@ const (
|
|||
SystemReservedEnforcementKey = "system-reserved"
|
||||
KubeReservedEnforcementKey = "kube-reserved"
|
||||
NodeAllocatableNoneKey = "none"
|
||||
|
||||
// fixed width version of time.RFC3339Nano
|
||||
RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
// variable width RFC3339 time format for lenient parsing of strings into timestamps
|
||||
RFC3339NanoLenient = "2006-01-02T15:04:05.999999999Z07:00"
|
||||
)
|
||||
|
|
|
@ -41,10 +41,10 @@ func NewTimestamp() *Timestamp {
|
|||
return &Timestamp{time.Now()}
|
||||
}
|
||||
|
||||
// ConvertToTimestamp takes a string, parses it using the RFC3339Nano layout,
|
||||
// ConvertToTimestamp takes a string, parses it using the RFC3339NanoLenient layout,
|
||||
// and converts it to a Timestamp object.
|
||||
func ConvertToTimestamp(timeString string) *Timestamp {
|
||||
parsed, _ := time.Parse(time.RFC3339Nano, timeString)
|
||||
parsed, _ := time.Parse(RFC3339NanoLenient, timeString)
|
||||
return &Timestamp{parsed}
|
||||
}
|
||||
|
||||
|
@ -53,10 +53,10 @@ func (t *Timestamp) Get() time.Time {
|
|||
return t.time
|
||||
}
|
||||
|
||||
// GetString returns the time in the string format using the RFC3339Nano
|
||||
// GetString returns the time in the string format using the RFC3339NanoFixed
|
||||
// layout.
|
||||
func (t *Timestamp) GetString() string {
|
||||
return t.time.Format(time.RFC3339Nano)
|
||||
return t.time.Format(RFC3339NanoFixed)
|
||||
}
|
||||
|
||||
// A type to help sort container statuses based on container names.
|
||||
|
|
|
@ -581,6 +581,16 @@ func listWithMoreString(list []string, more bool, count, max int) string {
|
|||
return ret
|
||||
}
|
||||
|
||||
// translateMicroTimestampSince returns the elapsed time since timestamp in
|
||||
// human-readable approximation.
|
||||
func translateMicroTimestampSince(timestamp metav1.MicroTime) string {
|
||||
if timestamp.IsZero() {
|
||||
return "<unknown>"
|
||||
}
|
||||
|
||||
return duration.HumanDuration(time.Since(timestamp.Time))
|
||||
}
|
||||
|
||||
// translateTimestampSince returns the elapsed time since timestamp in
|
||||
// human-readable approximation.
|
||||
func translateTimestampSince(timestamp metav1.Time) string {
|
||||
|
@ -1447,7 +1457,13 @@ func printEvent(obj *api.Event, options printers.GenerateOptions) ([]metav1beta1
|
|||
}
|
||||
|
||||
firstTimestamp := translateTimestampSince(obj.FirstTimestamp)
|
||||
if obj.FirstTimestamp.IsZero() {
|
||||
firstTimestamp = translateMicroTimestampSince(obj.EventTime)
|
||||
}
|
||||
lastTimestamp := translateTimestampSince(obj.LastTimestamp)
|
||||
if obj.LastTimestamp.IsZero() {
|
||||
lastTimestamp = firstTimestamp
|
||||
}
|
||||
|
||||
var target string
|
||||
if len(obj.InvolvedObject.Name) > 0 {
|
||||
|
|
|
@ -276,7 +276,7 @@ func NewProxier(ipt utiliptables.Interface,
|
|||
|
||||
// Generate the masquerade mark to use for SNAT rules.
|
||||
masqueradeValue := 1 << uint(masqueradeBit)
|
||||
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
|
||||
masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue)
|
||||
|
||||
if len(clusterCIDR) == 0 {
|
||||
klog.Warning("clusterCIDR not specified, unable to distinguish between internal and external traffic")
|
||||
|
@ -780,10 +780,20 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
// this so that it is easier to flush and change, for example if the mark
|
||||
// value should ever change.
|
||||
// NB: THIS MUST MATCH the corresponding code in the kubelet
|
||||
writeLine(proxier.natRules, []string{
|
||||
"-A", string(kubePostroutingChain),
|
||||
"-m", "mark", "!", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
|
||||
"-j", "RETURN",
|
||||
}...)
|
||||
// Clear the mark to avoid re-masquerading if the packet re-traverses the network stack.
|
||||
writeLine(proxier.natRules, []string{
|
||||
"-A", string(kubePostroutingChain),
|
||||
// XOR proxier.masqueradeMark to unset it
|
||||
"-j", "MARK", "--xor-mark", proxier.masqueradeMark,
|
||||
}...)
|
||||
masqRule := []string{
|
||||
"-A", string(kubePostroutingChain),
|
||||
"-m", "comment", "--comment", `"kubernetes service traffic requiring SNAT"`,
|
||||
"-m", "mark", "--mark", proxier.masqueradeMark,
|
||||
"-j", "MASQUERADE",
|
||||
}
|
||||
if proxier.iptables.HasRandomFully() {
|
||||
|
@ -799,7 +809,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
// value should ever change.
|
||||
writeLine(proxier.natRules, []string{
|
||||
"-A", string(KubeMarkMasqChain),
|
||||
"-j", "MARK", "--set-xmark", proxier.masqueradeMark,
|
||||
"-j", "MARK", "--or-mark", proxier.masqueradeMark,
|
||||
}...)
|
||||
|
||||
// Accumulate NAT chains to keep.
|
||||
|
@ -1372,7 +1382,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
writeLine(proxier.filterRules,
|
||||
"-A", string(kubeForwardChain),
|
||||
"-m", "comment", "--comment", `"kubernetes forwarding rules"`,
|
||||
"-m", "mark", "--mark", proxier.masqueradeMark,
|
||||
"-m", "mark", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
|
||||
"-j", "ACCEPT",
|
||||
)
|
||||
|
||||
|
|
|
@ -399,7 +399,7 @@ func NewProxier(ipt utiliptables.Interface,
|
|||
|
||||
// Generate the masquerade mark to use for SNAT rules.
|
||||
masqueradeValue := 1 << uint(masqueradeBit)
|
||||
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
|
||||
masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue)
|
||||
|
||||
isIPv6 := utilnet.IsIPv6(nodeIP)
|
||||
|
||||
|
@ -1621,7 +1621,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
|||
writeLine(proxier.filterRules,
|
||||
"-A", string(KubeForwardChain),
|
||||
"-m", "comment", "--comment", `"kubernetes forwarding rules"`,
|
||||
"-m", "mark", "--mark", proxier.masqueradeMark,
|
||||
"-m", "mark", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
|
||||
"-j", "ACCEPT",
|
||||
)
|
||||
|
||||
|
@ -1649,6 +1649,39 @@ func (proxier *Proxier) writeIptablesRules() {
|
|||
)
|
||||
}
|
||||
|
||||
// Install the kubernetes-specific postrouting rules. We use a whole chain for
|
||||
// this so that it is easier to flush and change, for example if the mark
|
||||
// value should ever change.
|
||||
// NB: THIS MUST MATCH the corresponding code in the kubelet
|
||||
writeLine(proxier.natRules, []string{
|
||||
"-A", string(kubePostroutingChain),
|
||||
"-m", "mark", "!", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
|
||||
"-j", "RETURN",
|
||||
}...)
|
||||
// Clear the mark to avoid re-masquerading if the packet re-traverses the network stack.
|
||||
writeLine(proxier.natRules, []string{
|
||||
"-A", string(kubePostroutingChain),
|
||||
// XOR proxier.masqueradeMark to unset it
|
||||
"-j", "MARK", "--xor-mark", proxier.masqueradeMark,
|
||||
}...)
|
||||
masqRule := []string{
|
||||
"-A", string(kubePostroutingChain),
|
||||
"-m", "comment", "--comment", `"kubernetes service traffic requiring SNAT"`,
|
||||
"-j", "MASQUERADE",
|
||||
}
|
||||
if proxier.iptables.HasRandomFully() {
|
||||
masqRule = append(masqRule, "--random-fully")
|
||||
}
|
||||
writeLine(proxier.natRules, masqRule...)
|
||||
|
||||
// Install the kubernetes-specific masquerade mark rule. We use a whole chain for
|
||||
// this so that it is easier to flush and change, for example if the mark
|
||||
// value should ever change.
|
||||
writeLine(proxier.natRules, []string{
|
||||
"-A", string(KubeMarkMasqChain),
|
||||
"-j", "MARK", "--or-mark", proxier.masqueradeMark,
|
||||
}...)
|
||||
|
||||
// Write the end-of-table markers.
|
||||
writeLine(proxier.filterRules, "COMMIT")
|
||||
writeLine(proxier.natRules, "COMMIT")
|
||||
|
@ -1707,31 +1740,6 @@ func (proxier *Proxier) createAndLinkeKubeChain() {
|
|||
}
|
||||
}
|
||||
|
||||
// Install the kubernetes-specific postrouting rules. We use a whole chain for
|
||||
// this so that it is easier to flush and change, for example if the mark
|
||||
// value should ever change.
|
||||
// NB: THIS MUST MATCH the corresponding code in the kubelet
|
||||
masqRule := []string{
|
||||
"-A", string(kubePostroutingChain),
|
||||
"-m", "comment", "--comment", `"kubernetes service traffic requiring SNAT"`,
|
||||
"-m", "mark", "--mark", proxier.masqueradeMark,
|
||||
"-j", "MASQUERADE",
|
||||
}
|
||||
if proxier.iptables.HasRandomFully() {
|
||||
masqRule = append(masqRule, "--random-fully")
|
||||
klog.V(3).Info("Using `--random-fully` in the MASQUERADE rule for iptables")
|
||||
} else {
|
||||
klog.V(2).Info("Not using `--random-fully` in the MASQUERADE rule for iptables because the local version of iptables does not support it")
|
||||
}
|
||||
writeLine(proxier.natRules, masqRule...)
|
||||
|
||||
// Install the kubernetes-specific masquerade mark rule. We use a whole chain for
|
||||
// this so that it is easier to flush and change, for example if the mark
|
||||
// value should ever change.
|
||||
writeLine(proxier.natRules, []string{
|
||||
"-A", string(KubeMarkMasqChain),
|
||||
"-j", "MARK", "--set-xmark", proxier.masqueradeMark,
|
||||
}...)
|
||||
}
|
||||
|
||||
// getExistingChains get iptables-save output so we can check for existing chains and rules.
|
||||
|
|
|
@ -3,8 +3,8 @@ package version
|
|||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "16"
|
||||
gitVersion = "v1.16.11-k3s1"
|
||||
gitCommit = "a2d85ac363941a5280ccd3a1415e38326c843dbe"
|
||||
gitVersion = "v1.16.13-k3s1"
|
||||
gitCommit = "0b9525c21107dd05b47060ade21a607549116813"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2020-06-17T18:17:34Z"
|
||||
buildDate = "2020-07-16T00:35:22Z"
|
||||
)
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog"
|
||||
|
@ -125,7 +126,7 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N
|
|||
(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
|
||||
// found the disk
|
||||
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
disks[i].ToBeDetached = to.BoolPtr(true)
|
||||
bFoundDisk = true
|
||||
break
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog"
|
||||
|
@ -129,7 +130,7 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
|
|||
(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
|
||||
// found the disk
|
||||
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
disks[i].ToBeDetached = to.BoolPtr(true)
|
||||
bFoundDisk = true
|
||||
break
|
||||
}
|
||||
|
|
|
@ -102,6 +102,8 @@ const (
|
|||
serviceTagKey = "service"
|
||||
// clusterNameKey is the cluster name key applied for public IP tags.
|
||||
clusterNameKey = "kubernetes-cluster-name"
|
||||
|
||||
defaultLoadBalancerSourceRanges = "0.0.0.0/0"
|
||||
)
|
||||
|
||||
// GetLoadBalancer returns whether the specified load balancer and its components exist, and
|
||||
|
@ -1134,6 +1136,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
|||
if lbIP != nil {
|
||||
destinationIPAddress = *lbIP
|
||||
}
|
||||
|
||||
if destinationIPAddress == "" {
|
||||
destinationIPAddress = "*"
|
||||
}
|
||||
|
@ -1143,6 +1146,12 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
|||
return nil, err
|
||||
}
|
||||
serviceTags := getServiceTags(service)
|
||||
if len(serviceTags) != 0 {
|
||||
if _, ok := sourceRanges[defaultLoadBalancerSourceRanges]; ok {
|
||||
delete(sourceRanges, defaultLoadBalancerSourceRanges)
|
||||
}
|
||||
}
|
||||
|
||||
var sourceAddressPrefixes []string
|
||||
if (sourceRanges == nil || servicehelpers.IsAllowAll(sourceRanges)) && len(serviceTags) == 0 {
|
||||
if !requiresInternalLoadBalancer(service) {
|
||||
|
|
|
@ -313,6 +313,13 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
labels := map[string]string{
|
||||
v1.LabelZoneRegion: c.Location,
|
||||
}
|
||||
// no azure credential is set, return nil
|
||||
if c.DisksClient == nil {
|
||||
return labels, nil
|
||||
}
|
||||
// Get information of the disk.
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
@ -325,7 +332,7 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) {
|
|||
// Check whether availability zone is specified.
|
||||
if disk.Zones == nil || len(*disk.Zones) == 0 {
|
||||
klog.V(4).Infof("Azure disk %q is not zoned", diskName)
|
||||
return nil, nil
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
zones := *disk.Zones
|
||||
|
@ -336,9 +343,6 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) {
|
|||
|
||||
zone := c.makeZone(c.Location, zoneID)
|
||||
klog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName)
|
||||
labels := map[string]string{
|
||||
v1.LabelZoneRegion: c.Location,
|
||||
v1.LabelZoneFailureDomain: zone,
|
||||
}
|
||||
labels[v1.LabelZoneFailureDomain] = zone
|
||||
return labels, nil
|
||||
}
|
||||
|
|
|
@ -1130,7 +1130,7 @@ gopkg.in/square/go-jose.v2/jwt
|
|||
gopkg.in/warnings.v0
|
||||
# gopkg.in/yaml.v2 v2.2.8
|
||||
gopkg.in/yaml.v2
|
||||
# k8s.io/api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.11-k3s1
|
||||
# k8s.io/api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.13-k3s1
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
k8s.io/api/admissionregistration/v1
|
||||
|
@ -1172,7 +1172,7 @@ k8s.io/api/settings/v1alpha1
|
|||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.11-k3s1
|
||||
# k8s.io/apiextensions-apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.13-k3s1
|
||||
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
||||
|
@ -1220,7 +1220,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
|
|||
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||
# k8s.io/apimachinery v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.11-k3s1
|
||||
# k8s.io/apimachinery v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.13-k3s1
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
k8s.io/apimachinery/pkg/api/meta
|
||||
|
@ -1282,7 +1282,7 @@ k8s.io/apimachinery/pkg/watch
|
|||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.11-k3s1
|
||||
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.13-k3s1
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/configuration
|
||||
k8s.io/apiserver/pkg/admission/initializer
|
||||
|
@ -1395,7 +1395,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
|
|||
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.11-k3s1
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.13-k3s1
|
||||
k8s.io/cli-runtime/pkg/genericclioptions
|
||||
k8s.io/cli-runtime/pkg/kustomize
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
|
||||
|
@ -1408,7 +1408,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
|
|||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
|
||||
k8s.io/cli-runtime/pkg/printers
|
||||
k8s.io/cli-runtime/pkg/resource
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.11-k3s1
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.13-k3s1
|
||||
k8s.io/client-go/discovery
|
||||
k8s.io/client-go/discovery/cached
|
||||
k8s.io/client-go/discovery/cached/disk
|
||||
|
@ -1594,20 +1594,20 @@ k8s.io/client-go/util/jsonpath
|
|||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.11-k3s1
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.13-k3s1
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/node/helpers
|
||||
k8s.io/cloud-provider/service/helpers
|
||||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/volume/errors
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.11-k3s1
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.13-k3s1
|
||||
k8s.io/cluster-bootstrap/token/api
|
||||
k8s.io/cluster-bootstrap/token/jws
|
||||
k8s.io/cluster-bootstrap/token/util
|
||||
k8s.io/cluster-bootstrap/util/secrets
|
||||
k8s.io/cluster-bootstrap/util/tokens
|
||||
# k8s.io/code-generator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.11-k3s1
|
||||
# k8s.io/code-generator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.13-k3s1
|
||||
k8s.io/code-generator/cmd/client-gen/args
|
||||
k8s.io/code-generator/cmd/client-gen/generators
|
||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||
|
@ -1622,7 +1622,7 @@ k8s.io/code-generator/cmd/lister-gen/args
|
|||
k8s.io/code-generator/cmd/lister-gen/generators
|
||||
k8s.io/code-generator/pkg/namer
|
||||
k8s.io/code-generator/pkg/util
|
||||
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.11-k3s1
|
||||
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.13-k3s1
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/cli/globalflag
|
||||
k8s.io/component-base/config
|
||||
|
@ -1635,10 +1635,10 @@ k8s.io/component-base/metrics/legacyregistry
|
|||
k8s.io/component-base/metrics/prometheus/restclient
|
||||
k8s.io/component-base/metrics/prometheus/workqueue
|
||||
k8s.io/component-base/version
|
||||
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.11-k3s1
|
||||
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.13-k3s1
|
||||
k8s.io/cri-api/pkg/apis
|
||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.11-k3s1
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.13-k3s1
|
||||
k8s.io/csi-translation-lib
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
# k8s.io/gengo v0.0.0-20190822140433-26a664648505
|
||||
|
@ -1653,7 +1653,7 @@ k8s.io/gengo/types
|
|||
k8s.io/heapster/metrics/api/v1/types
|
||||
# k8s.io/klog v1.0.0
|
||||
k8s.io/klog
|
||||
# k8s.io/kube-aggregator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.11-k3s1
|
||||
# k8s.io/kube-aggregator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.13-k3s1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
|
@ -1681,7 +1681,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
|
|||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.11-k3s1
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.13-k3s1
|
||||
k8s.io/kube-controller-manager/config/v1alpha1
|
||||
# k8s.io/kube-openapi v0.0.0-20200410163147-594e756bea31
|
||||
k8s.io/kube-openapi/pkg/aggregator
|
||||
|
@ -1692,11 +1692,11 @@ k8s.io/kube-openapi/pkg/schemaconv
|
|||
k8s.io/kube-openapi/pkg/util
|
||||
k8s.io/kube-openapi/pkg/util/proto
|
||||
k8s.io/kube-openapi/pkg/util/proto/validation
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.11-k3s1
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.13-k3s1
|
||||
k8s.io/kube-proxy/config/v1alpha1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.11-k3s1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.13-k3s1
|
||||
k8s.io/kube-scheduler/config/v1alpha1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.11-k3s1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.13-k3s1
|
||||
k8s.io/kubectl/pkg/apps
|
||||
k8s.io/kubectl/pkg/cmd
|
||||
k8s.io/kubectl/pkg/cmd/annotate
|
||||
|
@ -1773,9 +1773,9 @@ k8s.io/kubectl/pkg/util/templates
|
|||
k8s.io/kubectl/pkg/util/term
|
||||
k8s.io/kubectl/pkg/validation
|
||||
k8s.io/kubectl/pkg/version
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.11-k3s1
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.13-k3s1
|
||||
k8s.io/kubelet/config/v1beta1
|
||||
# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.16.11-k3s1
|
||||
# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.16.13-k3s1
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
|
||||
|
@ -2491,7 +2491,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
|
|||
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.11-k3s1
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.13-k3s1
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/azure
|
||||
k8s.io/legacy-cloud-providers/azure/auth
|
||||
|
@ -2501,7 +2501,7 @@ k8s.io/legacy-cloud-providers/openstack/util/mount
|
|||
k8s.io/legacy-cloud-providers/vsphere
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.11-k3s1
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.13-k3s1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
||||
|
|
Loading…
Reference in New Issue