Update to Kubernetes v1.19.9 (#3095)

* Update to Kubernetes v1.19.9

Signed-off-by: Jacob Blain Christen <jacob@rancher.com>
pull/3080/head
Jacob Blain Christen 2021-03-18 17:19:47 -07:00 committed by Brian Downs
parent 5dc61aad02
commit ac73524a77
45 changed files with 665 additions and 465 deletions

52
go.mod
View File

@ -32,31 +32,31 @@ replace (
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
google.golang.org/grpc => google.golang.org/grpc v1.27.1
gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.19.8-k3s1
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.8-k3s1
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.19.8-k3s1
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.19.8-k3s1
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.19.8-k3s1
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.19.8-k3s1
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.19.8-k3s1
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.8-k3s1
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.19.8-k3s1
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.19.8-k3s1
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.19.8-k3s1
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.8-k3s1
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.8-k3s1
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.8-k3s1
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.19.8-k3s1
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.8-k3s1
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.19.8-k3s1
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.19.8-k3s1
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.19.8-k3s1
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.8-k3s1
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.19.8-k3s1
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.19.8-k3s1
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.8-k3s1
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.8-k3s1
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.19.8-k3s1
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.19.9-k3s1
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.9-k3s1
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.19.9-k3s1
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.19.9-k3s1
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.19.9-k3s1
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.19.9-k3s1
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.19.9-k3s1
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.9-k3s1
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.19.9-k3s1
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.19.9-k3s1
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.19.9-k3s1
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.9-k3s1
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.9-k3s1
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.9-k3s1
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.19.9-k3s1
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.9-k3s1
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.19.9-k3s1
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.19.9-k3s1
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.19.9-k3s1
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.9-k3s1
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.19.9-k3s1
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.19.9-k3s1
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.9-k3s1
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.9-k3s1
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.19.9-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
)
@ -113,6 +113,6 @@ require (
k8s.io/component-base v0.19.0
k8s.io/cri-api v0.19.0
k8s.io/klog v1.0.0
k8s.io/kubernetes v1.19.8
k8s.io/kubernetes v1.19.9
sigs.k8s.io/yaml v1.2.0
)

100
go.sum
View File

@ -178,7 +178,6 @@ github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
@ -233,7 +232,6 @@ github.com/erikdubbelboer/gspt v0.0.0-20190125194910-e68493906b83/go.mod h1:v6o7
github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY=
github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
@ -366,8 +364,8 @@ github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/cadvisor v0.37.4 h1:xCkylGJJvSini5nSSXrUpNFsbuqBRqAOKS4ckS4uiZw=
github.com/google/cadvisor v0.37.4/go.mod h1:BalYQhwl2UV8lpB3oFssiaW8Uj6sqfFDxw5nEs9sBgU=
github.com/google/cadvisor v0.37.5 h1:7JxmD4TqlGk/B+bsabeBNL3YS+ppVjx74EUnautkVfk=
github.com/google/cadvisor v0.37.5/go.mod h1:BalYQhwl2UV8lpB3oFssiaW8Uj6sqfFDxw5nEs9sBgU=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -376,7 +374,6 @@ github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-containerregistry v0.0.0-20190617215043-876b8855d23c/go.mod h1:yZAFP63pRshzrEYLXLGPmUt0Ay+2zdjmMN1loCnRLUk=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -435,7 +432,6 @@ github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o=
github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
@ -478,49 +474,49 @@ github.com/k3s-io/etcd v0.5.0-alpha.5.0.20201208200253-50621aee4aea h1:7cwby0GoN
github.com/k3s-io/etcd v0.5.0-alpha.5.0.20201208200253-50621aee4aea/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
github.com/k3s-io/helm-controller v0.8.3 h1:GWxavyMz7Bw2ClxH5okkeOL8o5U6IBK7uauc44SDCjU=
github.com/k3s-io/helm-controller v0.8.3/go.mod h1:nZP8FH3KZrNNUf5r+SwwiMR63HS6lxdHdpHijgPfF74=
github.com/k3s-io/kubernetes v1.19.8-k3s1 h1:MoOcLARL41TK95Da/4Xj+y+F7Y5u2VOf6fMZ9RSO7OA=
github.com/k3s-io/kubernetes v1.19.8-k3s1/go.mod h1:6R5K7prywmLDOVGBGd50Ig6lKkgGYaiUT7/r3GZXhsM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.19.8-k3s1 h1:tyR1urDN8IIyZurUxL8F79adwon2XJ/wYCdLgf2ijOc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.19.8-k3s1/go.mod h1:Y4VjjNur38HL6/QxaTVK2yno1zjEQlvcvwbbRQs2DtQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.8-k3s1 h1:uRQRe1ND1sveA0134ATPGOmZi00pgZWZBRRMlGStUWQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.8-k3s1/go.mod h1:BvtZU215FgO19Oy19K6h8qwajFfjxYqGewgjuYHWGRw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.19.8-k3s1 h1:6tFPgWvZbC7iQMqVmFKEOk8BmnD6FJibFHSDgbHPgIU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.19.8-k3s1/go.mod h1:Cir19TeK3cPorF8tm1BqHUpLE4tYbtgo6ZT/2unWq+A=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.19.8-k3s1 h1:8zXGveWmCvGzXftrwFCDUuYpjpS6d1lkA2zzqrsAoiI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.19.8-k3s1/go.mod h1:LD8H+K1jJ1c6p6GKylM8C+tWWwJl15fTCYn4ifLDuj8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.19.8-k3s1 h1:31STpZgIndRCHuWRPV+xGr07u3Lb2YFV1UR1obX6Mus=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.19.8-k3s1/go.mod h1:twd45pbv7psOvyGxI8eABhpeoXWW3bCX6aB5NVS6TXs=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.19.8-k3s1 h1:kMwxys9EEKJNJR3TqwlYiXcdAQG1nhY96LP5mcYnVeA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.19.8-k3s1/go.mod h1:WByMjeDTwqE4K0WUDPq1kNmBjnOEMxjJm3iG4zQrBmU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.19.8-k3s1 h1:/+QK3S3l4B37RRJM9M+01LveYB8H1ehohboGv7Emhb4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.19.8-k3s1/go.mod h1:vXBe7m69RSxIR/m6bm820O+WYUJHP9OXtEyTZf3twRo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.8-k3s1 h1:FTbPKXRkfZkrNRfVUdpFQ2+aOEuxwsS7VXhIBMb2xs0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.8-k3s1/go.mod h1:kEUR4nHaGTacDcHrY2P4IriUdykXxPuwBg7picxa+gk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.19.8-k3s1 h1:m2KmvYcLd9mYMROvp9uXeKUXQVFtcxyKVu/Ngbkg+eQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.19.8-k3s1/go.mod h1:ymtOdHaouPTqN4cBy2nJEI9SwiFi4Ht9AEembTam83A=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.19.8-k3s1 h1:Aoq+z9kBPRrhpZbgsAi9qNsZHiNhcWE10Q1n2WYbGfQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.19.8-k3s1/go.mod h1:jR+bJp7erYNUmcS7lWDm404aFVonltWE56LV8CuqKyg=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.19.8-k3s1 h1:ugRBS/suoeXoRv2SQ4Ocor+ifJgQYZ9Zu1TEx2IttIc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.19.8-k3s1/go.mod h1:t0ujJLDWfxhgYv03mw0mty4oGeEg9WHRxajuBq74nuI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.8-k3s1 h1:BJPMU+Dov6P7zty+MHt1e4ABzcOrZaTKemqs3+h6860=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.8-k3s1/go.mod h1:adA9bBHXD+K7tPn7kTpjQ3VcUzK6PFgAdNEofr4fEx4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.8-k3s1 h1:qSWT8U0wZRs5w54IQGgTbz4CC9VUSbDYBvOHeXMvrxY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.8-k3s1/go.mod h1:ph2Opcv7drLEnBS0p8fBIRdt+SXCQS/7u0oyYILZRtc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.8-k3s1 h1:HitkOkZLSWJi0zol7UcCSTUrebdbHXhsEEpVKXbQa/4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.8-k3s1/go.mod h1:6CzB8GMVD+ZlCXZjHiqccHpC9FFlRTCz+rHd176Lxfc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.19.8-k3s1 h1:glOM42CzLt9UcvO4ghQhNu/PVPplGMmfmW01BV3sxdM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.19.8-k3s1/go.mod h1:PWMBqO9xuXWJS8REJ8QWiouJzbiOwVVVT81ZTwYb2Nk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.8-k3s1 h1:P985rLmprTvggWnkelqNPvHvjTJXEpvwzcDu3JcmodU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.8-k3s1/go.mod h1:23iT4D9thFRxYGZ9DOa7zQ4ZyrFHyp+Bz5dXiruHNwI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.19.8-k3s1 h1:trzv/a1vL+JQpmLeIq0nV5ZCnyEmhv10bFTUhExl5PQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.19.8-k3s1/go.mod h1:UBqQ69FVXyP8Wpzo+Ni5LUZhwjzJCPSb+6eG8naeDKM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.19.8-k3s1 h1:9uBE6AkNtPCaw+amght9xjX0JkTEP+7738oFmZphmlc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.19.8-k3s1/go.mod h1:vixy/pWGmIQu3aFTKuelxNvANNH8fcQCo8K3sbd/CXk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.8-k3s1 h1:RbjP4H312u+i5spDabykELSRGdnfayg7kE/6ccpy+mI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.8-k3s1/go.mod h1:iv4u51XYDkRdyvp7BBP+KuQ+ZHjEjoCECFVzMJBDGZA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.19.8-k3s1 h1:5MYOaKtgKZpfD6RsZei6DAQDJCeHLVaZbMxOIRf3Cv4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.19.8-k3s1/go.mod h1:HZHgmugH8oA4ZxTPt9R8HYJBWEeLDegmaGoPo3HDK3I=
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.8-k3s1/go.mod h1:tl3x2SPSVsIqnioD4z87jXFemilbRh1EYDm3KirMsjI=
github.com/k3s-io/kubernetes v1.19.9-k3s1 h1:rhBnxjiYRkl+3Tx9o5HCJ5FNMtpsrjlQa/tVO2w1iV8=
github.com/k3s-io/kubernetes v1.19.9-k3s1/go.mod h1:rb3bmBEps/XmoUkNbE3H5vsMihMy+Rt5IA1K9KLK4DM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.19.9-k3s1 h1:l1ZbZx97gNUQfVFu6aK2HI7E8c03MuLnAKgHlllDV4Y=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.19.9-k3s1/go.mod h1:Y4VjjNur38HL6/QxaTVK2yno1zjEQlvcvwbbRQs2DtQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.9-k3s1 h1:zbpz8WG6tmQfqgFQhRUCGUo31dit/QyF2TQu7taSuZQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.9-k3s1/go.mod h1:BvtZU215FgO19Oy19K6h8qwajFfjxYqGewgjuYHWGRw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.19.9-k3s1 h1:c3z7UMpLfYg50Hu/m4YIr5Y3oZFzomy6dE3/nJuk5BE=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.19.9-k3s1/go.mod h1:Cir19TeK3cPorF8tm1BqHUpLE4tYbtgo6ZT/2unWq+A=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.19.9-k3s1 h1:z9ufganb3UMHjEDqkhfB4eYvzQlAjdBMMjL/S7tuveo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.19.9-k3s1/go.mod h1:8YhdLenhJ1sJ7XDki29w5+V0eeu2Dv4S5/VTunEit3Q=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.19.9-k3s1 h1:T8SNuErN0Q9rIa3I6qkxeimx9UDAXvBXE2jVcsOflKs=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.19.9-k3s1/go.mod h1:twd45pbv7psOvyGxI8eABhpeoXWW3bCX6aB5NVS6TXs=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.19.9-k3s1 h1:aYcJvm356toIv/YIAWLnU2viHwR69GGXtIlzNNdYj6M=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.19.9-k3s1/go.mod h1:WByMjeDTwqE4K0WUDPq1kNmBjnOEMxjJm3iG4zQrBmU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.19.9-k3s1 h1:gfEjQ9t43Bfx+DS/eXT70JBFfdjRHcO1kd1HShzhpZs=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.19.9-k3s1/go.mod h1:vXBe7m69RSxIR/m6bm820O+WYUJHP9OXtEyTZf3twRo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.9-k3s1 h1:Qr/2/XY0iP95d2/AQsd2PqBi4m9iBLuU5OWYJxYmug8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.9-k3s1/go.mod h1:kEUR4nHaGTacDcHrY2P4IriUdykXxPuwBg7picxa+gk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.19.9-k3s1 h1:GF6mQ/D9ekn0cVuz1RT1z3FD8wtYKCDtraknq8dsYvQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.19.9-k3s1/go.mod h1:ymtOdHaouPTqN4cBy2nJEI9SwiFi4Ht9AEembTam83A=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.19.9-k3s1 h1:sV7pvsG+cJicTLEB9l79IC0vsmVS9BLx2TEbSD8U3k8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.19.9-k3s1/go.mod h1:jR+bJp7erYNUmcS7lWDm404aFVonltWE56LV8CuqKyg=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.19.9-k3s1 h1:WihIbLZRJ0qsB89VVu+RBrqZKjb06SPjPztpsdtrjvM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.19.9-k3s1/go.mod h1:t0ujJLDWfxhgYv03mw0mty4oGeEg9WHRxajuBq74nuI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.9-k3s1 h1:3JeO2T1rQ21VeQNuJEqm+6ta9IGlnguAGsWT/VcAon4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.9-k3s1/go.mod h1:adA9bBHXD+K7tPn7kTpjQ3VcUzK6PFgAdNEofr4fEx4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.9-k3s1 h1:Mwka2gDom+thrBmBC8Ba+oYqHtT4rpVpKVbsFfmcuFY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.9-k3s1/go.mod h1:ph2Opcv7drLEnBS0p8fBIRdt+SXCQS/7u0oyYILZRtc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.9-k3s1 h1:6/wPIqoMBl0lr12+DoHa706HYe5TFIzY57KJEe+0TuY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.9-k3s1/go.mod h1:6CzB8GMVD+ZlCXZjHiqccHpC9FFlRTCz+rHd176Lxfc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.19.9-k3s1 h1:LHtm5obQUQYsAfUIxzwhL7oHGXqsyqekrUIFnTm90XA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.19.9-k3s1/go.mod h1:PWMBqO9xuXWJS8REJ8QWiouJzbiOwVVVT81ZTwYb2Nk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.9-k3s1 h1:n0IwptXBi1xZ77yJArdKiXBM6SiiEm3UAS3pk2LOeLM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.9-k3s1/go.mod h1:23iT4D9thFRxYGZ9DOa7zQ4ZyrFHyp+Bz5dXiruHNwI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.19.9-k3s1 h1:o+Tm8IO1syUAdyaZi+bLPA39JKtjaKkHcbRPe+WJZsk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.19.9-k3s1/go.mod h1:UBqQ69FVXyP8Wpzo+Ni5LUZhwjzJCPSb+6eG8naeDKM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.19.9-k3s1 h1:O5H6TKQkJD50rhNKVNPWfElga2OSybwaK7CENW/agFU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.19.9-k3s1/go.mod h1:vixy/pWGmIQu3aFTKuelxNvANNH8fcQCo8K3sbd/CXk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.9-k3s1 h1:W/E16ekGbk7Zwbx2fpJNAIWaotO4641QkVK2p8+XVqQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.9-k3s1/go.mod h1:iv4u51XYDkRdyvp7BBP+KuQ+ZHjEjoCECFVzMJBDGZA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.19.9-k3s1 h1:UnD6lJTEznwjeZFixUAG0zbz9pRe4EJKA1+MlFgpaZ0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.19.9-k3s1/go.mod h1:HZHgmugH8oA4ZxTPt9R8HYJBWEeLDegmaGoPo3HDK3I=
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.9-k3s1/go.mod h1:tl3x2SPSVsIqnioD4z87jXFemilbRh1EYDm3KirMsjI=
github.com/karrick/godirwalk v1.7.5 h1:VbzFqwXwNbAZoA6W5odrLr+hKK197CcENcPh6E/gJ0M=
github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
@ -535,7 +531,6 @@ github.com/knative/pkg v0.0.0-20190514205332-5e4512dcb2ca/go.mod h1:7Ijfhw7rfB+H
github.com/knative/serving v0.6.1/go.mod h1:ljvMfwQy2qanaM/8xnBSK4Mz3Vv2NawC2fo5kFRJS1A=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@ -770,7 +765,6 @@ github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjM
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
@ -1077,8 +1071,8 @@ modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/structured-merge-diff v0.0.0-20190426204423-ea680f03cc65 h1:xJNnO2qzHtgVCSPoGkkltSpyEX7D7IJw1TmbE3G/7lY=

View File

@ -29,14 +29,13 @@ import (
"strings"
"time"
"github.com/google/cadvisor/container"
info "github.com/google/cadvisor/info/v1"
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/cgroups"
fs2 "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
"k8s.io/klog/v2"
"github.com/google/cadvisor/container"
info "github.com/google/cadvisor/info/v1"
)
var (
@ -758,16 +757,6 @@ func (h *Handler) GetProcesses() ([]int, error) {
return pids, nil
}
func minUint32(x, y uint32) uint32 {
if x < y {
return x
}
return y
}
// var to allow unit tests to stub it out
var numCpusFunc = getNumberOnlineCPUs
// Convert libcontainer stats to info.ContainerStats.
func setCPUStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode
@ -785,37 +774,7 @@ func setCPUStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
// cpuacct subsystem.
return
}
numPossible := uint32(len(s.CpuStats.CpuUsage.PercpuUsage))
// Note that as of https://patchwork.kernel.org/patch/8607101/ (kernel v4.7),
// the percpu usage information includes extra zero values for all additional
// possible CPUs. This is to allow statistic collection after CPU-hotplug.
// We intentionally ignore these extra zeroes.
numActual, err := numCpusFunc()
if err != nil {
klog.Errorf("unable to determine number of actual cpus; defaulting to maximum possible number: errno %v", err)
numActual = numPossible
}
if numActual > numPossible {
// The real number of cores should never be greater than the number of
// datapoints reported in cpu usage.
klog.Errorf("PercpuUsage had %v cpus, but the actual number is %v; ignoring extra CPUs", numPossible, numActual)
}
numActual = minUint32(numPossible, numActual)
ret.Cpu.Usage.PerCpu = make([]uint64, numActual)
for i := uint32(0); i < numActual; i++ {
ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i]
}
}
func getNumberOnlineCPUs() (uint32, error) {
var availableCPUs unix.CPUSet
if err := unix.SchedGetaffinity(0, &availableCPUs); err != nil {
return 0, err
}
return uint32(availableCPUs.Count()), nil
ret.Cpu.Usage.PerCpu = s.CpuStats.CpuUsage.PercpuUsage
}
func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) {

View File

@ -383,7 +383,7 @@ func getCoresInfo(sysFs sysfs.SysFs, cpuDirs []string) ([]info.Core, error) {
for _, cpuDir := range cpuDirs {
cpuID, err := getMatchedInt(cpuDirRegExp, cpuDir)
if err != nil {
return nil, fmt.Errorf("Unexpected format of CPU directory, cpuDirRegExp %s, cpuDir: %s", cpuDirRegExp, cpuDir)
return nil, fmt.Errorf("unexpected format of CPU directory, cpuDirRegExp %s, cpuDir: %s", cpuDirRegExp, cpuDir)
}
if !sysFs.IsCPUOnline(cpuDir) {
continue
@ -401,25 +401,6 @@ func getCoresInfo(sysFs sysfs.SysFs, cpuDirs []string) ([]info.Core, error) {
return nil, err
}
coreIDx := -1
for id, core := range cores {
if core.Id == physicalID {
coreIDx = id
}
}
if coreIDx == -1 {
cores = append(cores, info.Core{})
coreIDx = len(cores) - 1
}
desiredCore := &cores[coreIDx]
desiredCore.Id = physicalID
if len(desiredCore.Threads) == 0 {
desiredCore.Threads = []int{cpuID}
} else {
desiredCore.Threads = append(desiredCore.Threads, cpuID)
}
rawPhysicalPackageID, err := sysFs.GetCPUPhysicalPackageID(cpuDir)
if os.IsNotExist(err) {
klog.Warningf("Cannot read physical package id for %s, physical_package_id file does not exist, err: %s", cpuDir, err)
@ -432,7 +413,28 @@ func getCoresInfo(sysFs sysfs.SysFs, cpuDirs []string) ([]info.Core, error) {
if err != nil {
return nil, err
}
coreIDx := -1
for id, core := range cores {
if core.Id == physicalID && core.SocketID == physicalPackageID {
coreIDx = id
}
}
if coreIDx == -1 {
cores = append(cores, info.Core{})
coreIDx = len(cores) - 1
}
desiredCore := &cores[coreIDx]
desiredCore.Id = physicalID
desiredCore.SocketID = physicalPackageID
if len(desiredCore.Threads) == 0 {
desiredCore.Threads = []int{cpuID}
} else {
desiredCore.Threads = append(desiredCore.Threads, cpuID)
}
}
return cores, nil
}

View File

@ -96,9 +96,11 @@ func SerializeObject(mediaType string, encoder runtime.Encoder, hw http.Response
err := encoder.Encode(object, w)
if err == nil {
err = w.Close()
if err == nil {
return
if err != nil {
// we cannot write an error to the writer anymore as the Encode call was successful.
utilruntime.HandleError(fmt.Errorf("apiserver was unable to close cleanly the response writer: %v", err))
}
return
}
// make a best effort to write the object if a failure is detected

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "19"
gitVersion = "v1.19.8-k3s1"
gitCommit = "71b31507f0011b9fd9a4311fabeb17e43d7ff0e6"
gitVersion = "v1.19.9-k3s1"
gitCommit = "9af4aa7bad3f896a07140a9e82fdd8642373c5d8"
gitTreeState = "clean"
buildDate = "2021-02-17T22:28:17Z"
buildDate = "2021-03-18T21:01:32Z"
)

View File

@ -66,7 +66,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
@ -249,7 +248,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@ -288,7 +286,6 @@ golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPj
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -353,7 +350,6 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "19"
gitVersion = "v1.19.8-k3s1"
gitCommit = "71b31507f0011b9fd9a4311fabeb17e43d7ff0e6"
gitVersion = "v1.19.9-k3s1"
gitCommit = "9af4aa7bad3f896a07140a9e82fdd8642373c5d8"
gitTreeState = "clean"
buildDate = "2021-02-17T22:28:17Z"
buildDate = "2021-03-18T21:01:32Z"
)

View File

@ -61,7 +61,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
@ -230,7 +229,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@ -267,7 +265,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=

View File

@ -87,11 +87,23 @@ func VisitContainers(podSpec *api.PodSpec, mask ContainerType, visitor Container
// Visitor is called with each object name, and returns true if visiting should continue
type Visitor func(name string) (shouldContinue bool)
func skipEmptyNames(visitor Visitor) Visitor {
return func(name string) bool {
if len(name) == 0 {
// continue visiting
return true
}
// delegate to visitor
return visitor(name)
}
}
// VisitPodSecretNames invokes the visitor function with the name of every secret
// referenced by the pod spec. If visitor returns false, visiting is short-circuited.
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodSecretNames(pod *api.Pod, visitor Visitor, containerType ContainerType) bool {
visitor = skipEmptyNames(visitor)
for _, reference := range pod.Spec.ImagePullSecrets {
if !visitor(reference.Name) {
return false
@ -180,6 +192,7 @@ func visitContainerSecretNames(container *api.Container, visitor Visitor) bool {
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodConfigmapNames(pod *api.Pod, visitor Visitor, containerType ContainerType) bool {
visitor = skipEmptyNames(visitor)
VisitContainers(&pod.Spec, containerType, func(c *api.Container, containerType ContainerType) bool {
return visitContainerConfigmapNames(c, visitor)
})

View File

@ -30,10 +30,22 @@ func getClaimRefNamespace(pv *corev1.PersistentVolume) string {
// Visitor is called with each object's namespace and name, and returns true if visiting should continue
type Visitor func(namespace, name string, kubeletVisible bool) (shouldContinue bool)
func skipEmptyNames(visitor Visitor) Visitor {
return func(namespace, name string, kubeletVisible bool) bool {
if len(name) == 0 {
// continue visiting
return true
}
// delegate to visitor
return visitor(namespace, name, kubeletVisible)
}
}
// VisitPVSecretNames invokes the visitor function with the name of every secret
// referenced by the PV spec. If visitor returns false, visiting is short-circuited.
// Returns true if visiting completed, false if visiting was short-circuited.
func VisitPVSecretNames(pv *corev1.PersistentVolume, visitor Visitor) bool {
visitor = skipEmptyNames(visitor)
source := &pv.Spec.PersistentVolumeSource
switch {
case source.AzureFile != nil:

View File

@ -82,6 +82,17 @@ type ContainerVisitor func(container *v1.Container, containerType ContainerType)
// Visitor is called with each object name, and returns true if visiting should continue
type Visitor func(name string) (shouldContinue bool)
func skipEmptyNames(visitor Visitor) Visitor {
return func(name string) bool {
if len(name) == 0 {
// continue visiting
return true
}
// delegate to visitor
return visitor(name)
}
}
// VisitContainers invokes the visitor function with a pointer to every container
// spec in the given pod spec with type set in mask. If visitor returns false,
// visiting is short-circuited. VisitContainers returns true if visiting completes,
@ -116,6 +127,7 @@ func VisitContainers(podSpec *v1.PodSpec, mask ContainerType, visitor ContainerV
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool {
visitor = skipEmptyNames(visitor)
for _, reference := range pod.Spec.ImagePullSecrets {
if !visitor(reference.Name) {
return false
@ -205,6 +217,7 @@ func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool {
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool {
visitor = skipEmptyNames(visitor)
VisitContainers(&pod.Spec, AllContainers, func(c *v1.Container, containerType ContainerType) bool {
return visitContainerConfigmapNames(c, visitor)
})

View File

@ -6,6 +6,7 @@ go_library(
"endpointset.go",
"endpointslice_controller.go",
"endpointslice_tracker.go",
"errors.go",
"reconciler.go",
"utils.go",
],

View File

@ -346,6 +346,10 @@ func (c *Controller) syncService(key string) error {
return err
}
if c.endpointSliceTracker.StaleSlices(service, endpointSlices) {
return &StaleInformerCache{"EndpointSlice informer cache is out of date"}
}
// We call ComputeEndpointLastChangeTriggerTime here to make sure that the
// state of the trigger time tracker gets updated even if the sync turns out
// to be no-op and we don't update the EndpointSlice objects.
@ -395,7 +399,7 @@ func (c *Controller) onEndpointSliceAdd(obj interface{}) {
utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceAdd()"))
return
}
if managedByController(endpointSlice) && c.endpointSliceTracker.Stale(endpointSlice) {
if managedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice) {
c.queueServiceForEndpointSlice(endpointSlice)
}
}
@ -411,7 +415,18 @@ func (c *Controller) onEndpointSliceUpdate(prevObj, obj interface{}) {
utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceUpdate()"))
return
}
if managedByChanged(prevEndpointSlice, endpointSlice) || (managedByController(endpointSlice) && c.endpointSliceTracker.Stale(endpointSlice)) {
// EndpointSlice generation does not change when labels change. Although the
// controller will never change LabelServiceName, users might. This check
// ensures that we handle changes to this label.
svcName := endpointSlice.Labels[discovery.LabelServiceName]
prevSvcName := prevEndpointSlice.Labels[discovery.LabelServiceName]
if svcName != prevSvcName {
klog.Warningf("%s label changed from %s to %s for %s", discovery.LabelServiceName, prevSvcName, svcName, endpointSlice.Name)
c.queueServiceForEndpointSlice(endpointSlice)
c.queueServiceForEndpointSlice(prevEndpointSlice)
return
}
if managedByChanged(prevEndpointSlice, endpointSlice) || (managedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice)) {
c.queueServiceForEndpointSlice(endpointSlice)
}
}
@ -422,7 +437,11 @@ func (c *Controller) onEndpointSliceUpdate(prevObj, obj interface{}) {
func (c *Controller) onEndpointSliceDelete(obj interface{}) {
endpointSlice := getEndpointSliceFromDeleteAction(obj)
if endpointSlice != nil && managedByController(endpointSlice) && c.endpointSliceTracker.Has(endpointSlice) {
c.queueServiceForEndpointSlice(endpointSlice)
// This returns false if we didn't expect the EndpointSlice to be
// deleted. If that is the case, we queue the Service for another sync.
if !c.endpointSliceTracker.HandleDeletion(endpointSlice) {
c.queueServiceForEndpointSlice(endpointSlice)
}
}
}

View File

@ -19,102 +19,154 @@ package endpointslice
import (
"sync"
"k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1beta1"
"k8s.io/apimachinery/pkg/types"
)
// endpointSliceResourceVersions tracks expected EndpointSlice resource versions
// by EndpointSlice name.
type endpointSliceResourceVersions map[string]string
const (
deletionExpected = -1
)
// endpointSliceTracker tracks EndpointSlices and their associated resource
// versions to help determine if a change to an EndpointSlice has been processed
// by the EndpointSlice controller.
// generationsBySlice tracks expected EndpointSlice generations by EndpointSlice
// uid. A value of deletionExpected (-1) may be used here to indicate that we
// expect this EndpointSlice to be deleted.
type generationsBySlice map[types.UID]int64
// endpointSliceTracker tracks EndpointSlices and their associated generation to
// help determine if a change to an EndpointSlice has been processed by the
// EndpointSlice controller.
type endpointSliceTracker struct {
// lock protects resourceVersionsByService.
// lock protects generationsByService.
lock sync.Mutex
// resourceVersionsByService tracks the list of EndpointSlices and
// associated resource versions expected for a given Service.
resourceVersionsByService map[types.NamespacedName]endpointSliceResourceVersions
// generationsByService tracks the generations of EndpointSlices for each
// Service.
generationsByService map[types.NamespacedName]generationsBySlice
}
// newEndpointSliceTracker creates and initializes a new endpointSliceTracker.
func newEndpointSliceTracker() *endpointSliceTracker {
return &endpointSliceTracker{
resourceVersionsByService: map[types.NamespacedName]endpointSliceResourceVersions{},
generationsByService: map[types.NamespacedName]generationsBySlice{},
}
}
// Has returns true if the endpointSliceTracker has a resource version for the
// Has returns true if the endpointSliceTracker has a generation for the
// provided EndpointSlice.
func (est *endpointSliceTracker) Has(endpointSlice *discovery.EndpointSlice) bool {
est.lock.Lock()
defer est.lock.Unlock()
rrv, ok := est.relatedResourceVersions(endpointSlice)
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
if !ok {
return false
}
_, ok = rrv[endpointSlice.Name]
_, ok = gfs[endpointSlice.UID]
return ok
}
// Stale returns true if this endpointSliceTracker does not have a resource
// version for the provided EndpointSlice or it does not match the resource
// version of the provided EndpointSlice.
func (est *endpointSliceTracker) Stale(endpointSlice *discovery.EndpointSlice) bool {
// ShouldSync returns true if this endpointSliceTracker does not have a
// generation for the provided EndpointSlice or it is greater than the
// generation of the tracked EndpointSlice.
func (est *endpointSliceTracker) ShouldSync(endpointSlice *discovery.EndpointSlice) bool {
est.lock.Lock()
defer est.lock.Unlock()
rrv, ok := est.relatedResourceVersions(endpointSlice)
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
if !ok {
return true
}
return rrv[endpointSlice.Name] != endpointSlice.ResourceVersion
g, ok := gfs[endpointSlice.UID]
return !ok || endpointSlice.Generation > g
}
// Update adds or updates the resource version in this endpointSliceTracker for
// the provided EndpointSlice.
// StaleSlices returns true if one or more of the provided EndpointSlices
// have older generations than the corresponding tracked ones or if the tracker
// is expecting one or more of the provided EndpointSlices to be deleted.
func (est *endpointSliceTracker) StaleSlices(service *v1.Service, endpointSlices []*discovery.EndpointSlice) bool {
est.lock.Lock()
defer est.lock.Unlock()
nn := types.NamespacedName{Name: service.Name, Namespace: service.Namespace}
gfs, ok := est.generationsByService[nn]
if !ok {
return false
}
for _, endpointSlice := range endpointSlices {
g, ok := gfs[endpointSlice.UID]
if ok && (g == deletionExpected || g > endpointSlice.Generation) {
return true
}
}
return false
}
// Update adds or updates the generation in this endpointSliceTracker for the
// provided EndpointSlice.
func (est *endpointSliceTracker) Update(endpointSlice *discovery.EndpointSlice) {
est.lock.Lock()
defer est.lock.Unlock()
rrv, ok := est.relatedResourceVersions(endpointSlice)
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
if !ok {
rrv = endpointSliceResourceVersions{}
est.resourceVersionsByService[getServiceNN(endpointSlice)] = rrv
gfs = generationsBySlice{}
est.generationsByService[getServiceNN(endpointSlice)] = gfs
}
rrv[endpointSlice.Name] = endpointSlice.ResourceVersion
gfs[endpointSlice.UID] = endpointSlice.Generation
}
// DeleteService removes the set of resource versions tracked for the Service.
// DeleteService removes the set of generations tracked for the Service.
func (est *endpointSliceTracker) DeleteService(namespace, name string) {
est.lock.Lock()
defer est.lock.Unlock()
serviceNN := types.NamespacedName{Name: name, Namespace: namespace}
delete(est.resourceVersionsByService, serviceNN)
delete(est.generationsByService, serviceNN)
}
// Delete removes the resource version in this endpointSliceTracker for the
// provided EndpointSlice.
func (est *endpointSliceTracker) Delete(endpointSlice *discovery.EndpointSlice) {
// ExpectDeletion sets the generation to deletionExpected in this
// endpointSliceTracker for the provided EndpointSlice.
func (est *endpointSliceTracker) ExpectDeletion(endpointSlice *discovery.EndpointSlice) {
est.lock.Lock()
defer est.lock.Unlock()
rrv, ok := est.relatedResourceVersions(endpointSlice)
if ok {
delete(rrv, endpointSlice.Name)
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
if !ok {
gfs = generationsBySlice{}
est.generationsByService[getServiceNN(endpointSlice)] = gfs
}
gfs[endpointSlice.UID] = deletionExpected
}
// relatedResourceVersions returns the set of resource versions tracked for the
// Service corresponding to the provided EndpointSlice, and a bool to indicate
// if it exists.
func (est *endpointSliceTracker) relatedResourceVersions(endpointSlice *discovery.EndpointSlice) (endpointSliceResourceVersions, bool) {
// HandleDeletion removes the generation in this endpointSliceTracker for the
// provided EndpointSlice. This returns true if the tracker expected this
// EndpointSlice to be deleted and false if not.
func (est *endpointSliceTracker) HandleDeletion(endpointSlice *discovery.EndpointSlice) bool {
est.lock.Lock()
defer est.lock.Unlock()
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
if ok {
g, ok := gfs[endpointSlice.UID]
delete(gfs, endpointSlice.UID)
if ok && g != deletionExpected {
return false
}
}
return true
}
// generationsForSliceUnsafe returns the generations for the Service
// corresponding to the provided EndpointSlice, and a bool to indicate if it
// exists. A lock must be applied before calling this function.
func (est *endpointSliceTracker) generationsForSliceUnsafe(endpointSlice *discovery.EndpointSlice) (generationsBySlice, bool) {
serviceNN := getServiceNN(endpointSlice)
vers, ok := est.resourceVersionsByService[serviceNN]
return vers, ok
generations, ok := est.generationsByService[serviceNN]
return generations, ok
}
// getServiceNN returns a namespaced name for the Service corresponding to the

View File

@ -0,0 +1,30 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpointslice
// StaleInformerCache errors indicate that the informer cache includes out of
// date resources.
type StaleInformerCache struct {
msg string
}
func (e *StaleInformerCache) Error() string { return e.msg }
func isStaleInformerCacheErr(err error) bool {
_, ok := err.(*StaleInformerCache)
return ok
}

View File

@ -239,7 +239,7 @@ func (r *reconciler) finalize(
if err != nil {
return fmt.Errorf("failed to delete %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)
}
r.endpointSliceTracker.Delete(endpointSlice)
r.endpointSliceTracker.ExpectDeletion(endpointSlice)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
}

View File

@ -6,6 +6,7 @@ go_library(
"endpointset.go",
"endpointslice_tracker.go",
"endpointslicemirroring_controller.go",
"errors.go",
"events.go",
"reconciler.go",
"reconciler_helpers.go",

View File

@ -19,102 +19,154 @@ package endpointslicemirroring
import (
"sync"
"k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1beta1"
"k8s.io/apimachinery/pkg/types"
)
// endpointSliceResourceVersions tracks expected EndpointSlice resource versions
// by EndpointSlice name.
type endpointSliceResourceVersions map[string]string
const (
deletionExpected = -1
)
// endpointSliceTracker tracks EndpointSlices and their associated resource
// versions to help determine if a change to an EndpointSlice has been processed
// by the EndpointSlice controller.
// generationsBySlice tracks expected EndpointSlice generations by EndpointSlice
// uid. A value of deletionExpected (-1) may be used here to indicate that we
// expect this EndpointSlice to be deleted.
type generationsBySlice map[types.UID]int64
// endpointSliceTracker tracks EndpointSlices and their associated generation to
// help determine if a change to an EndpointSlice has been processed by the
// EndpointSlice controller.
type endpointSliceTracker struct {
// lock protects resourceVersionsByService.
// lock protects generationsByService.
lock sync.Mutex
// resourceVersionsByService tracks the list of EndpointSlices and
// associated resource versions expected for a given Service.
resourceVersionsByService map[types.NamespacedName]endpointSliceResourceVersions
// generationsByService tracks the generations of EndpointSlices for each
// Service.
generationsByService map[types.NamespacedName]generationsBySlice
}
// newEndpointSliceTracker creates and initializes a new endpointSliceTracker.
func newEndpointSliceTracker() *endpointSliceTracker {
return &endpointSliceTracker{
resourceVersionsByService: map[types.NamespacedName]endpointSliceResourceVersions{},
generationsByService: map[types.NamespacedName]generationsBySlice{},
}
}
// Has returns true if the endpointSliceTracker has a resource version for the
// Has returns true if the endpointSliceTracker has a generation for the
// provided EndpointSlice.
func (est *endpointSliceTracker) Has(endpointSlice *discovery.EndpointSlice) bool {
est.lock.Lock()
defer est.lock.Unlock()
rrv, ok := est.relatedResourceVersions(endpointSlice)
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
if !ok {
return false
}
_, ok = rrv[endpointSlice.Name]
_, ok = gfs[endpointSlice.UID]
return ok
}
// Stale returns true if this endpointSliceTracker does not have a resource
// version for the provided EndpointSlice or it does not match the resource
// version of the provided EndpointSlice.
func (est *endpointSliceTracker) Stale(endpointSlice *discovery.EndpointSlice) bool {
// ShouldSync returns true if this endpointSliceTracker does not have a
// generation for the provided EndpointSlice or it is greater than the
// generation of the tracked EndpointSlice.
func (est *endpointSliceTracker) ShouldSync(endpointSlice *discovery.EndpointSlice) bool {
est.lock.Lock()
defer est.lock.Unlock()
rrv, ok := est.relatedResourceVersions(endpointSlice)
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
if !ok {
return true
}
return rrv[endpointSlice.Name] != endpointSlice.ResourceVersion
g, ok := gfs[endpointSlice.UID]
return !ok || endpointSlice.Generation > g
}
// Update adds or updates the resource version in this endpointSliceTracker for
// the provided EndpointSlice.
// StaleSlices returns true if one or more of the provided EndpointSlices
// have older generations than the corresponding tracked ones or if the tracker
// is expecting one or more of the provided EndpointSlices to be deleted.
func (est *endpointSliceTracker) StaleSlices(service *v1.Service, endpointSlices []*discovery.EndpointSlice) bool {
est.lock.Lock()
defer est.lock.Unlock()
nn := types.NamespacedName{Name: service.Name, Namespace: service.Namespace}
gfs, ok := est.generationsByService[nn]
if !ok {
return false
}
for _, endpointSlice := range endpointSlices {
g, ok := gfs[endpointSlice.UID]
if ok && (g == deletionExpected || g > endpointSlice.Generation) {
return true
}
}
return false
}
// Update adds or updates the generation in this endpointSliceTracker for the
// provided EndpointSlice.
func (est *endpointSliceTracker) Update(endpointSlice *discovery.EndpointSlice) {
est.lock.Lock()
defer est.lock.Unlock()
rrv, ok := est.relatedResourceVersions(endpointSlice)
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
if !ok {
rrv = endpointSliceResourceVersions{}
est.resourceVersionsByService[getServiceNN(endpointSlice)] = rrv
gfs = generationsBySlice{}
est.generationsByService[getServiceNN(endpointSlice)] = gfs
}
rrv[endpointSlice.Name] = endpointSlice.ResourceVersion
gfs[endpointSlice.UID] = endpointSlice.Generation
}
// DeleteService removes the set of resource versions tracked for the Service.
// DeleteService removes the set of generations tracked for the Service.
func (est *endpointSliceTracker) DeleteService(namespace, name string) {
est.lock.Lock()
defer est.lock.Unlock()
serviceNN := types.NamespacedName{Name: name, Namespace: namespace}
delete(est.resourceVersionsByService, serviceNN)
delete(est.generationsByService, serviceNN)
}
// Delete removes the resource version in this endpointSliceTracker for the
// provided EndpointSlice.
func (est *endpointSliceTracker) Delete(endpointSlice *discovery.EndpointSlice) {
// ExpectDeletion sets the generation to deletionExpected in this
// endpointSliceTracker for the provided EndpointSlice.
func (est *endpointSliceTracker) ExpectDeletion(endpointSlice *discovery.EndpointSlice) {
est.lock.Lock()
defer est.lock.Unlock()
rrv, ok := est.relatedResourceVersions(endpointSlice)
if ok {
delete(rrv, endpointSlice.Name)
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
if !ok {
gfs = generationsBySlice{}
est.generationsByService[getServiceNN(endpointSlice)] = gfs
}
gfs[endpointSlice.UID] = deletionExpected
}
// relatedResourceVersions returns the set of resource versions tracked for the
// Service corresponding to the provided EndpointSlice, and a bool to indicate
// if it exists.
func (est *endpointSliceTracker) relatedResourceVersions(endpointSlice *discovery.EndpointSlice) (endpointSliceResourceVersions, bool) {
// HandleDeletion removes the generation in this endpointSliceTracker for the
// provided EndpointSlice. This returns true if the tracker expected this
// EndpointSlice to be deleted and false if not.
func (est *endpointSliceTracker) HandleDeletion(endpointSlice *discovery.EndpointSlice) bool {
est.lock.Lock()
defer est.lock.Unlock()
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
if ok {
g, ok := gfs[endpointSlice.UID]
delete(gfs, endpointSlice.UID)
if ok && g != deletionExpected {
return false
}
}
return true
}
// generationsForSliceUnsafe returns the generations for the Service
// corresponding to the provided EndpointSlice, and a bool to indicate if it
// exists. A lock must be applied before calling this function.
func (est *endpointSliceTracker) generationsForSliceUnsafe(endpointSlice *discovery.EndpointSlice) (generationsBySlice, bool) {
serviceNN := getServiceNN(endpointSlice)
vers, ok := est.resourceVersionsByService[serviceNN]
return vers, ok
generations, ok := est.generationsByService[serviceNN]
return generations, ok
}
// getServiceNN returns a namespaced name for the Service corresponding to the

View File

@ -316,6 +316,10 @@ func (c *Controller) syncEndpoints(key string) error {
return err
}
if c.endpointSliceTracker.StaleSlices(svc, endpointSlices) {
return &StaleInformerCache{"EndpointSlice informer cache is out of date"}
}
err = c.reconciler.reconcile(endpoints, endpointSlices)
if err != nil {
return err
@ -439,7 +443,7 @@ func (c *Controller) onEndpointSliceAdd(obj interface{}) {
utilruntime.HandleError(fmt.Errorf("onEndpointSliceAdd() expected type discovery.EndpointSlice, got %T", obj))
return
}
if managedByController(endpointSlice) && c.endpointSliceTracker.Stale(endpointSlice) {
if managedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice) {
c.queueEndpointsForEndpointSlice(endpointSlice)
}
}
@ -455,7 +459,18 @@ func (c *Controller) onEndpointSliceUpdate(prevObj, obj interface{}) {
utilruntime.HandleError(fmt.Errorf("onEndpointSliceUpdated() expected type discovery.EndpointSlice, got %T, %T", prevObj, obj))
return
}
if managedByChanged(prevEndpointSlice, endpointSlice) || (managedByController(endpointSlice) && c.endpointSliceTracker.Stale(endpointSlice)) {
// EndpointSlice generation does not change when labels change. Although the
// controller will never change LabelServiceName, users might. This check
// ensures that we handle changes to this label.
svcName := endpointSlice.Labels[discovery.LabelServiceName]
prevSvcName := prevEndpointSlice.Labels[discovery.LabelServiceName]
if svcName != prevSvcName {
klog.Warningf("%s label changed from %s to %s for %s", discovery.LabelServiceName, prevSvcName, svcName, endpointSlice.Name)
c.queueEndpointsForEndpointSlice(endpointSlice)
c.queueEndpointsForEndpointSlice(prevEndpointSlice)
return
}
if managedByChanged(prevEndpointSlice, endpointSlice) || (managedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice)) {
c.queueEndpointsForEndpointSlice(endpointSlice)
}
}
@ -470,7 +485,11 @@ func (c *Controller) onEndpointSliceDelete(obj interface{}) {
return
}
if managedByController(endpointSlice) && c.endpointSliceTracker.Has(endpointSlice) {
c.queueEndpointsForEndpointSlice(endpointSlice)
// This returns false if we didn't expect the EndpointSlice to be
// deleted. If that is the case, we queue the Service for another sync.
if !c.endpointSliceTracker.HandleDeletion(endpointSlice) {
c.queueEndpointsForEndpointSlice(endpointSlice)
}
}
}

View File

@ -0,0 +1,25 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpointslicemirroring
// StaleInformerCache errors indicate that the informer cache includes out of
// date resources.
type StaleInformerCache struct {
msg string
}
func (e *StaleInformerCache) Error() string { return e.msg }

View File

@ -263,7 +263,7 @@ func (r *reconciler) finalize(endpoints *corev1.Endpoints, slices slicesByAction
if err != nil {
return fmt.Errorf("failed to delete %s EndpointSlice for Endpoints %s/%s: %v", endpointSlice.Name, endpoints.Namespace, endpoints.Name, err)
}
r.endpointSliceTracker.Delete(endpointSlice)
r.endpointSliceTracker.ExpectDeletion(endpointSlice)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
}

View File

@ -1477,13 +1477,13 @@ func (nc *Controller) markNodeForTainting(node *v1.Node, status v1.ConditionStat
defer nc.evictorLock.Unlock()
if status == v1.ConditionFalse {
if !taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) {
nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].SetRemove(node.Name)
nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name)
}
}
if status == v1.ConditionUnknown {
if !taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) {
nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].SetRemove(node.Name)
nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name)
}
}

View File

@ -194,15 +194,6 @@ func (q *UniqueQueue) Clear() {
}
}
// SetRemove remove value from the set if value existed
func (q *UniqueQueue) SetRemove(value string) {
q.lock.Lock()
defer q.lock.Unlock()
if q.set.Has(value) {
q.set.Delete(value)
}
}
// RateLimitedTimedQueue is a unique item priority queue ordered by
// the expected next time of execution. It is also rate limited.
type RateLimitedTimedQueue struct {
@ -289,11 +280,6 @@ func (q *RateLimitedTimedQueue) Clear() {
q.queue.Clear()
}
// SetRemove remove value from the set of the queue
func (q *RateLimitedTimedQueue) SetRemove(value string) {
q.queue.SetRemove(value)
}
// SwapLimiter safely swaps current limiter for this queue with the
// passed one if capacities or qps's differ.
func (q *RateLimitedTimedQueue) SwapLimiter(newQPS float32) {

View File

@ -56,7 +56,6 @@ go_library(
"//pkg/apis/core/v1/helper/qos:go_default_library",
"//pkg/kubelet/cadvisor:go_default_library",
"//pkg/kubelet/cm/containermap:go_default_library",
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
"//pkg/kubelet/cm/devicemanager:go_default_library",
"//pkg/kubelet/cm/util:go_default_library",
"//pkg/kubelet/events:go_default_library",
@ -119,7 +118,6 @@ go_library(
"//pkg/apis/core/v1/helper/qos:go_default_library",
"//pkg/kubelet/cadvisor:go_default_library",
"//pkg/kubelet/cm/containermap:go_default_library",
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
"//pkg/kubelet/cm/devicemanager:go_default_library",
"//pkg/kubelet/cm/util:go_default_library",
"//pkg/kubelet/events:go_default_library",

View File

@ -53,7 +53,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
cputopology "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util"
@ -239,13 +238,6 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
if err != nil {
return nil, err
}
// Correct NUMA information is currently missing from cadvisor's
// MachineInfo struct, so we use the CPUManager's internal logic for
// gathering NUMANodeInfo to pass to components that care about it.
numaNodeInfo, err := cputopology.GetNUMANodeInfo()
if err != nil {
return nil, err
}
capacity := cadvisor.CapacityFromMachineInfo(machineInfo)
for k, v := range capacity {
internalCapacity[k] = v
@ -313,7 +305,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManager) {
cm.topologyManager, err = topologymanager.NewManager(
numaNodeInfo,
machineInfo.Topology,
nodeConfig.ExperimentalTopologyManagerPolicy,
)
@ -328,7 +320,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
klog.Infof("Creating device plugin manager: %t", devicePluginEnabled)
if devicePluginEnabled {
cm.deviceManager, err = devicemanager.NewManagerImpl(numaNodeInfo, cm.topologyManager)
cm.deviceManager, err = devicemanager.NewManagerImpl(machineInfo.Topology, cm.topologyManager)
cm.topologyManager.AddHintProvider(cm.deviceManager)
} else {
cm.deviceManager, err = devicemanager.NewManagerStub()
@ -343,7 +335,6 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
nodeConfig.ExperimentalCPUManagerPolicy,
nodeConfig.ExperimentalCPUManagerReconcilePeriod,
machineInfo,
numaNodeInfo,
nodeConfig.NodeAllocatableConfig.ReservedSystemCPUs,
cm.GetNodeAllocatableReservation(),
nodeConfig.KubeletRootDir,

View File

@ -126,7 +126,7 @@ func (s *sourcesReadyStub) AddSource(source string) {}
func (s *sourcesReadyStub) AllReady() bool { return true }
// NewManager creates new cpu manager based on provided policy
func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, numaNodeInfo topology.NUMANodeInfo, specificCPUs cpuset.CPUSet, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) {
func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, specificCPUs cpuset.CPUSet, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) {
var topo *topology.CPUTopology
var policy Policy
@ -137,7 +137,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
case PolicyStatic:
var err error
topo, err = topology.Discover(machineInfo, numaNodeInfo)
topo, err = topology.Discover(machineInfo)
if err != nil {
return nil, err
}

View File

@ -364,24 +364,18 @@ func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v
func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reusableCPUs cpuset.CPUSet, request int) []topologymanager.TopologyHint {
// Initialize minAffinitySize to include all NUMA Nodes.
minAffinitySize := p.topology.CPUDetails.NUMANodes().Size()
// Initialize minSocketsOnMinAffinity to include all Sockets.
minSocketsOnMinAffinity := p.topology.CPUDetails.Sockets().Size()
// Iterate through all combinations of socket bitmask and build hints from them.
// Iterate through all combinations of numa nodes bitmask and build hints from them.
hints := []topologymanager.TopologyHint{}
bitmask.IterateBitMasks(p.topology.CPUDetails.NUMANodes().ToSlice(), func(mask bitmask.BitMask) {
// First, update minAffinitySize and minSocketsOnMinAffinity for the
// current request size.
// First, update minAffinitySize for the current request size.
cpusInMask := p.topology.CPUDetails.CPUsInNUMANodes(mask.GetBits()...).Size()
socketsInMask := p.topology.CPUDetails.SocketsInNUMANodes(mask.GetBits()...).Size()
if cpusInMask >= request && mask.Count() < minAffinitySize {
minAffinitySize = mask.Count()
if socketsInMask < minSocketsOnMinAffinity {
minSocketsOnMinAffinity = socketsInMask
}
}
// Then check to see if all of the reusable CPUs are part of the bitmask.
// Then check to see if we have enough CPUs available on the current
// numa node bitmask to satisfy the CPU request.
numMatching := 0
for _, c := range reusableCPUs.ToSlice() {
// Disregard this mask if its NUMANode isn't part of it.
@ -404,7 +398,7 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
return
}
// Otherwise, create a new hint from the socket bitmask and add it to the
// Otherwise, create a new hint from the numa node bitmask and add it to the
// list of hints. We set all hint preferences to 'false' on the first
// pass through.
hints = append(hints, topologymanager.TopologyHint{
@ -416,14 +410,10 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
// Loop back through all hints and update the 'Preferred' field based on
// counting the number of bits sets in the affinity mask and comparing it
// to the minAffinitySize. Only those with an equal number of bits set (and
// with a minimal set of sockets) will be considered preferred.
// with a minimal set of numa nodes) will be considered preferred.
for i := range hints {
if hints[i].NUMANodeAffinity.Count() == minAffinitySize {
nodes := hints[i].NUMANodeAffinity.GetBits()
numSockets := p.topology.CPUDetails.SocketsInNUMANodes(nodes...).Size()
if numSockets == minSocketsOnMinAffinity {
hints[i].Preferred = true
}
hints[i].Preferred = true
}
}

View File

@ -33,8 +33,5 @@ go_test(
name = "go_default_test",
srcs = ["topology_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/kubelet/cm/cpuset:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
],
deps = ["//vendor/github.com/google/cadvisor/info/v1:go_default_library"],
)

View File

@ -18,8 +18,6 @@ package topology
import (
"fmt"
"io/ioutil"
"strings"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/klog/v2"
@ -218,7 +216,7 @@ func (d CPUDetails) CPUsInCores(ids ...int) cpuset.CPUSet {
}
// Discover returns CPUTopology based on cadvisor node info
func Discover(machineInfo *cadvisorapi.MachineInfo, numaNodeInfo NUMANodeInfo) (*CPUTopology, error) {
func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) {
if machineInfo.NumCores == 0 {
return nil, fmt.Errorf("could not detect number of cpus")
}
@ -226,26 +224,20 @@ func Discover(machineInfo *cadvisorapi.MachineInfo, numaNodeInfo NUMANodeInfo) (
CPUDetails := CPUDetails{}
numPhysicalCores := 0
for _, socket := range machineInfo.Topology {
numPhysicalCores += len(socket.Cores)
for _, core := range socket.Cores {
for _, node := range machineInfo.Topology {
numPhysicalCores += len(node.Cores)
for _, core := range node.Cores {
if coreID, err := getUniqueCoreID(core.Threads); err == nil {
for _, cpu := range core.Threads {
numaNodeID := 0
for id, cset := range numaNodeInfo {
if cset.Contains(cpu) {
numaNodeID = id
}
}
CPUDetails[cpu] = CPUInfo{
CoreID: coreID,
SocketID: socket.Id,
NUMANodeID: numaNodeID,
SocketID: core.SocketID,
NUMANodeID: node.Id,
}
}
} else {
klog.Errorf("could not get unique coreID for socket: %d core %d threads: %v",
socket.Id, core.Id, core.Threads)
core.SocketID, core.Id, core.Threads)
return nil, err
}
}
@ -253,7 +245,7 @@ func Discover(machineInfo *cadvisorapi.MachineInfo, numaNodeInfo NUMANodeInfo) (
return &CPUTopology{
NumCPUs: machineInfo.NumCores,
NumSockets: len(machineInfo.Topology),
NumSockets: machineInfo.NumSockets,
NumCores: numPhysicalCores,
CPUDetails: CPUDetails,
}, nil
@ -280,49 +272,3 @@ func getUniqueCoreID(threads []int) (coreID int, err error) {
return min, nil
}
// GetNUMANodeInfo uses sysfs to return a map of NUMANode id to the list of
// CPUs associated with that NUMANode.
//
// TODO: This is a temporary workaround until cadvisor provides this
// information directly in machineInfo. We should remove this once this
// information is available from cadvisor.
func GetNUMANodeInfo() (NUMANodeInfo, error) {
// Get the possible NUMA nodes on this machine. If reading this file
// is not possible, this is not an error. Instead, we just return a
// nil NUMANodeInfo, indicating that no NUMA information is available
// on this machine. This should implicitly be interpreted as having a
// single NUMA node with id 0 for all CPUs.
nodelist, err := ioutil.ReadFile("/sys/devices/system/node/online")
if err != nil {
return nil, nil
}
// Parse the nodelist into a set of Node IDs
nodes, err := cpuset.Parse(strings.TrimSpace(string(nodelist)))
if err != nil {
return nil, err
}
info := make(NUMANodeInfo)
// For each node...
for _, node := range nodes.ToSlice() {
// Read the 'cpulist' of the NUMA node from sysfs.
path := fmt.Sprintf("/sys/devices/system/node/node%d/cpulist", node)
cpulist, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
// Convert the 'cpulist' into a set of CPUs.
cpus, err := cpuset.Parse(strings.TrimSpace(string(cpulist)))
if err != nil {
return nil, err
}
info[node] = cpus
}
return info, nil
}

View File

@ -19,7 +19,6 @@ go_library(
"//pkg/kubelet/apis/podresources/v1alpha1:go_default_library",
"//pkg/kubelet/checkpointmanager:go_default_library",
"//pkg/kubelet/checkpointmanager/errors:go_default_library",
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
"//pkg/kubelet/cm/devicemanager/checkpoint:go_default_library",
"//pkg/kubelet/cm/topologymanager:go_default_library",
"//pkg/kubelet/cm/topologymanager/bitmask:go_default_library",
@ -37,6 +36,7 @@ go_library(
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1:go_default_library",
"//staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],

View File

@ -26,6 +26,7 @@ import (
"sync"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"google.golang.org/grpc"
"k8s.io/klog/v2"
@ -40,7 +41,6 @@ import (
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
cputopology "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
@ -124,11 +124,11 @@ func (s *sourcesReadyStub) AddSource(source string) {}
func (s *sourcesReadyStub) AllReady() bool { return true }
// NewManagerImpl creates a new manager.
func NewManagerImpl(numaNodeInfo cputopology.NUMANodeInfo, topologyAffinityStore topologymanager.Store) (*ManagerImpl, error) {
return newManagerImpl(pluginapi.KubeletSocket, numaNodeInfo, topologyAffinityStore)
func NewManagerImpl(topology []cadvisorapi.Node, topologyAffinityStore topologymanager.Store) (*ManagerImpl, error) {
return newManagerImpl(pluginapi.KubeletSocket, topology, topologyAffinityStore)
}
func newManagerImpl(socketPath string, numaNodeInfo cputopology.NUMANodeInfo, topologyAffinityStore topologymanager.Store) (*ManagerImpl, error) {
func newManagerImpl(socketPath string, topology []cadvisorapi.Node, topologyAffinityStore topologymanager.Store) (*ManagerImpl, error) {
klog.V(2).Infof("Creating Device Plugin manager at %s", socketPath)
if socketPath == "" || !filepath.IsAbs(socketPath) {
@ -136,8 +136,8 @@ func newManagerImpl(socketPath string, numaNodeInfo cputopology.NUMANodeInfo, to
}
var numaNodes []int
for node := range numaNodeInfo {
numaNodes = append(numaNodes, node)
for _, node := range topology {
numaNodes = append(numaNodes, node.Id)
}
dir, file := filepath.Split(socketPath)

View File

@ -14,10 +14,10 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager",
visibility = ["//visibility:public"],
deps = [
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
"//pkg/kubelet/cm/topologymanager/bitmask:go_default_library",
"//pkg/kubelet/lifecycle:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)

View File

@ -20,9 +20,9 @@ import (
"fmt"
"sync"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/api/core/v1"
"k8s.io/klog/v2"
cputopology "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
)
@ -122,12 +122,12 @@ func (th *TopologyHint) LessThan(other TopologyHint) bool {
var _ Manager = &manager{}
//NewManager creates a new TopologyManager based on provided policy
func NewManager(numaNodeInfo cputopology.NUMANodeInfo, topologyPolicyName string) (Manager, error) {
func NewManager(topology []cadvisorapi.Node, topologyPolicyName string) (Manager, error) {
klog.Infof("[topologymanager] Creating topology manager with %s policy", topologyPolicyName)
var numaNodes []int
for node := range numaNodeInfo {
numaNodes = append(numaNodes, node)
for _, node := range topology {
numaNodes = append(numaNodes, node.Id)
}
if topologyPolicyName != PolicyNone && len(numaNodes) > maxAllowableNUMANodes {

View File

@ -368,17 +368,46 @@ func (kl *Kubelet) getMountedVolumePathListFromDisk(podUID types.UID) ([]string,
return mountedVolumes, nil
}
// podVolumesSubpathsDirExists returns true if the pod volume-subpaths directory for
// a given pod exists
func (kl *Kubelet) podVolumeSubpathsDirExists(podUID types.UID) (bool, error) {
podVolDir := kl.getPodVolumeSubpathsDir(podUID)
// getPodVolumeSubpathListFromDisk returns a list of the volume-subpath paths by reading the
// subpath directories for the given pod from the disk.
func (kl *Kubelet) getPodVolumeSubpathListFromDisk(podUID types.UID) ([]string, error) {
volumes := []string{}
podSubpathsDir := kl.getPodVolumeSubpathsDir(podUID)
if pathExists, pathErr := mount.PathExists(podVolDir); pathErr != nil {
return true, fmt.Errorf("error checking if path %q exists: %v", podVolDir, pathErr)
if pathExists, pathErr := mount.PathExists(podSubpathsDir); pathErr != nil {
return nil, fmt.Errorf("error checking if path %q exists: %v", podSubpathsDir, pathErr)
} else if !pathExists {
return false, nil
return volumes, nil
}
return true, nil
// Explicitly walks /<volume>/<container name>/<subPathIndex>
volumePluginDirs, err := ioutil.ReadDir(podSubpathsDir)
if err != nil {
klog.Errorf("Could not read directory %s: %v", podSubpathsDir, err)
return volumes, err
}
for _, volumePluginDir := range volumePluginDirs {
volumePluginName := volumePluginDir.Name()
volumePluginPath := filepath.Join(podSubpathsDir, volumePluginName)
containerDirs, err := ioutil.ReadDir(volumePluginPath)
if err != nil {
return volumes, fmt.Errorf("could not read directory %s: %v", volumePluginPath, err)
}
for _, containerDir := range containerDirs {
containerName := containerDir.Name()
containerPath := filepath.Join(volumePluginPath, containerName)
// Switch to ReadDirNoStat at the subPathIndex level to prevent issues with stat'ing
// mount points that may not be responsive
subPaths, err := utilpath.ReadDirNoStat(containerPath)
if err != nil {
return volumes, fmt.Errorf("could not read directory %s: %v", containerPath, err)
}
for _, subPathDir := range subPaths {
volumes = append(volumes, filepath.Join(containerPath, subPathDir))
}
}
}
return volumes, nil
}
// GetRequestedContainersInfo returns container info.

View File

@ -18,6 +18,7 @@ package kubelet
import (
"fmt"
"syscall"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@ -120,25 +121,49 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
klog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid)
continue
}
// If there are still volume directories, do not delete directory
allVolumesCleanedUp := true
// If there are still volume directories, attempt to rmdir them
volumePaths, err := kl.getPodVolumePathListFromDisk(uid)
if err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err))
continue
}
if len(volumePaths) > 0 {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but volume paths are still present on disk", uid))
continue
for _, volumePath := range volumePaths {
if err := syscall.Rmdir(volumePath); err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() volume at path %v: %v", uid, volumePath, err))
allVolumesCleanedUp = false
} else {
klog.Warningf("Cleaned up orphaned volume from pod %q at %s", uid, volumePath)
}
}
}
// If there are any volume-subpaths, do not cleanup directories
volumeSubpathExists, err := kl.podVolumeSubpathsDirExists(uid)
// If there are any volume-subpaths, attempt to rmdir them
subpathVolumePaths, err := kl.getPodVolumeSubpathListFromDisk(uid)
if err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err))
continue
}
if volumeSubpathExists {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but volume subpaths are still present on disk", uid))
if len(subpathVolumePaths) > 0 {
for _, subpathVolumePath := range subpathVolumePaths {
if err := syscall.Rmdir(subpathVolumePath); err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() subpath at path %v: %v", uid, subpathVolumePath, err))
allVolumesCleanedUp = false
} else {
klog.Warningf("Cleaned up orphaned volume subpath from pod %q at %s", uid, subpathVolumePath)
}
}
}
if !allVolumesCleanedUp {
// Not all volumes were removed, so don't clean up the pod directory yet. It is likely
// that there are still mountpoints left which could stall RemoveAllOneFilesystem which
// would otherwise be called below.
// Errors for all removal operations have already been recorded, so don't add another
// one here.
continue
}

View File

@ -160,6 +160,12 @@ func (w *Watcher) handleCreateEvent(event fsnotify.Event) error {
klog.V(6).Infof("Handling create event: %v", event)
fi, err := os.Stat(event.Name)
// TODO: This is a workaround for Windows 20H2 issue for os.Stat(). Please see
// microsoft/Windows-Containers#97 for details.
// Once the issue is resvolved, the following os.Lstat() is not needed.
if err != nil && runtime.GOOS == "windows" {
fi, err = os.Lstat(event.Name)
}
if err != nil {
return fmt.Errorf("stat file %s failed: %v", event.Name, err)
}

View File

@ -46,6 +46,7 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/features:go_default_library",
"//pkg/quota/v1:go_default_library",
"//pkg/quota/v1/generic:go_default_library",
"//pkg/util/node:go_default_library",
@ -54,6 +55,8 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
],
)

View File

@ -30,10 +30,12 @@ import (
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/util/feature"
api "k8s.io/kubernetes/pkg/apis/core"
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
quota "k8s.io/kubernetes/pkg/quota/v1"
"k8s.io/kubernetes/pkg/quota/v1/generic"
)
@ -351,6 +353,10 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, e
limits = quota.Max(limits, pod.Spec.InitContainers[i].Resources.Limits)
}
if feature.DefaultFeatureGate.Enabled(features.PodOverhead) {
requests = quota.Add(requests, pod.Spec.Overhead)
limits = quota.Add(limits, pod.Spec.Overhead)
}
result = quota.Add(result, podComputeUsageHelper(requests, limits))
return result, nil
}

View File

@ -313,13 +313,8 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
volDataKey.volHandle: csiSource.VolumeHandle,
volDataKey.driverName: csiSource.Driver,
}
if err = saveVolumeData(dataDir, volDataFileName, data); err != nil {
klog.Error(log("failed to save volume info data: %v", err))
if cleanErr := os.RemoveAll(dataDir); cleanErr != nil {
klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanErr))
}
return err
}
err = saveVolumeData(dataDir, volDataFileName, data)
defer func() {
// Only if there was an error and volume operation was considered
// finished, we should remove the directory.
@ -332,6 +327,12 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
}
}()
if err != nil {
errMsg := log("failed to save volume info data: %v", err)
klog.Error(errMsg)
return errors.New(errMsg)
}
if !stageUnstageSet {
klog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.

View File

@ -42,6 +42,7 @@ import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
const (
@ -439,11 +440,23 @@ func (p *csiPlugin) NewMounter(
attachID := getAttachmentName(volumeHandle, driverName, node)
volData[volDataKey.attachmentID] = attachID
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
if removeErr := os.RemoveAll(dataDir); removeErr != nil {
klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, removeErr))
err = saveVolumeData(dataDir, volDataFileName, volData)
defer func() {
// Only if there was an error and volume operation was considered
// finished, we should remove the directory.
if err != nil && volumetypes.IsOperationFinishedError(err) {
// attempt to cleanup volume mount dir.
if err = removeMountDir(p, dir); err != nil {
klog.Error(log("attacher.MountDevice failed to remove mount dir after error [%s]: %v", dir, err))
}
}
return nil, errors.New(log("failed to save volume info data: %v", err))
}()
if err != nil {
errorMsg := log("csi.NewMounter failed to save volume info data: %v", err)
klog.Error(errorMsg)
return nil, errors.New(errorMsg)
}
klog.V(4).Info(log("mounter created successfully"))
@ -684,11 +697,21 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt
volDataKey.attachmentID: attachID,
}
if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil {
if removeErr := os.RemoveAll(dataDir); removeErr != nil {
klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, removeErr))
err = saveVolumeData(dataDir, volDataFileName, volData)
defer func() {
// Only if there was an error and volume operation was considered
// finished, we should remove the directory.
if err != nil && volumetypes.IsOperationFinishedError(err) {
// attempt to cleanup volume mount dir.
if err = removeMountDir(p, dataDir); err != nil {
klog.Error(log("attacher.MountDevice failed to remove mount dir after error [%s]: %v", dataDir, err))
}
}
return nil, errors.New(log("failed to save volume info data: %v", err))
}()
if err != nil {
errorMsg := log("csi.NewBlockVolumeMapper failed to save volume info data: %v", err)
klog.Error(errorMsg)
return nil, errors.New(errorMsg)
}
return mapper, nil

View File

@ -75,7 +75,7 @@ func getUpperPath(path string) string {
// Check whether a directory/file is a link type or not
// LinkType could be SymbolicLink, Junction, or HardLink
func isLinkPath(path string) (bool, error) {
cmd := fmt.Sprintf("(Get-Item -Path %s).LinkType", path)
cmd := fmt.Sprintf("(Get-Item -LiteralPath %q).LinkType", path)
output, err := exec.Command("powershell", "/c", cmd).CombinedOutput()
if err != nil {
return false, err
@ -113,7 +113,7 @@ func evalSymlink(path string) (string, error) {
}
}
// This command will give the target path of a given symlink
cmd := fmt.Sprintf("(Get-Item -Path %s).Target", upperpath)
cmd := fmt.Sprintf("(Get-Item -LiteralPath %q).Target", upperpath)
output, err := exec.Command("powershell", "/c", cmd).CombinedOutput()
if err != nil {
return "", err

96
vendor/modules.txt vendored
View File

@ -545,7 +545,7 @@ github.com/golang/protobuf/ptypes/timestamp
github.com/golang/protobuf/ptypes/wrappers
# github.com/google/btree v1.0.0
github.com/google/btree
# github.com/google/cadvisor v0.37.4
# github.com/google/cadvisor v0.37.5
github.com/google/cadvisor/accelerators
github.com/google/cadvisor/cache/memory
github.com/google/cadvisor/collector
@ -1365,7 +1365,7 @@ gopkg.in/warnings.v0
gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
gopkg.in/yaml.v3
# k8s.io/api v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.19.8-k3s1
# k8s.io/api v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.19.9-k3s1
## explicit
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
@ -1411,7 +1411,7 @@ k8s.io/api/settings/v1alpha1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.8-k3s1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.9-k3s1
k8s.io/apiextensions-apiserver/pkg/apihelpers
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
@ -1451,7 +1451,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
k8s.io/apiextensions-apiserver/pkg/registry/customresource
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
# k8s.io/apimachinery v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.19.8-k3s1
# k8s.io/apimachinery v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.19.9-k3s1
## explicit
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
@ -1515,7 +1515,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.19.8-k3s1
# k8s.io/apiserver v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.19.9-k3s1
## explicit
k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration
@ -1643,7 +1643,7 @@ k8s.io/apiserver/plugin/pkg/audit/webhook
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
k8s.io/apiserver/plugin/pkg/authorizer/webhook
# k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.19.8-k3s1
# k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.19.9-k3s1
k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/kustomize
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
@ -1656,7 +1656,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.19.8-k3s1
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.19.9-k3s1
## explicit
k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached
@ -1895,7 +1895,7 @@ k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue
# k8s.io/cloud-provider v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.19.8-k3s1
# k8s.io/cloud-provider v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.19.9-k3s1
## explicit
k8s.io/cloud-provider
k8s.io/cloud-provider/api
@ -1908,13 +1908,13 @@ k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/volume/helpers
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.8-k3s1
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.9-k3s1
k8s.io/cluster-bootstrap/token/api
k8s.io/cluster-bootstrap/token/jws
k8s.io/cluster-bootstrap/token/util
k8s.io/cluster-bootstrap/util/secrets
k8s.io/cluster-bootstrap/util/tokens
# k8s.io/code-generator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.19.8-k3s1
# k8s.io/code-generator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.19.9-k3s1
k8s.io/code-generator/cmd/client-gen/args
k8s.io/code-generator/cmd/client-gen/generators
k8s.io/code-generator/cmd/client-gen/generators/fake
@ -1929,7 +1929,7 @@ k8s.io/code-generator/cmd/lister-gen/args
k8s.io/code-generator/cmd/lister-gen/generators
k8s.io/code-generator/pkg/namer
k8s.io/code-generator/pkg/util
# k8s.io/component-base v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.19.8-k3s1
# k8s.io/component-base v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.19.9-k3s1
## explicit
k8s.io/component-base/cli/flag
k8s.io/component-base/cli/globalflag
@ -1953,11 +1953,11 @@ k8s.io/component-base/metrics/testutil
k8s.io/component-base/term
k8s.io/component-base/version
k8s.io/component-base/version/verflag
# k8s.io/cri-api v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.19.8-k3s1
# k8s.io/cri-api v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.19.9-k3s1
## explicit
k8s.io/cri-api/pkg/apis
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.8-k3s1
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.9-k3s1
k8s.io/csi-translation-lib
k8s.io/csi-translation-lib/plugins
# k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14
@ -1975,7 +1975,7 @@ k8s.io/heapster/metrics/api/v1/types
k8s.io/klog
# k8s.io/klog/v2 v2.2.0
k8s.io/klog/v2
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.8-k3s1
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.9-k3s1
k8s.io/kube-aggregator/pkg/apis/apiregistration
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
@ -2003,7 +2003,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.8-k3s1
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.9-k3s1
k8s.io/kube-controller-manager/config/v1alpha1
# k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6
k8s.io/kube-openapi/pkg/aggregator
@ -2014,13 +2014,13 @@ k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/util
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/util/proto/validation
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.19.8-k3s1
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.19.9-k3s1
k8s.io/kube-proxy/config/v1alpha1
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.8-k3s1
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.9-k3s1
k8s.io/kube-scheduler/config/v1
k8s.io/kube-scheduler/config/v1beta1
k8s.io/kube-scheduler/extender/v1
# k8s.io/kubectl v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.19.8-k3s1
# k8s.io/kubectl v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.19.9-k3s1
k8s.io/kubectl/pkg/apps
k8s.io/kubectl/pkg/cmd
k8s.io/kubectl/pkg/cmd/annotate
@ -2096,11 +2096,11 @@ k8s.io/kubectl/pkg/util/storage
k8s.io/kubectl/pkg/util/templates
k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/validation
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.19.8-k3s1
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.19.9-k3s1
k8s.io/kubelet/config/v1beta1
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
k8s.io/kubelet/pkg/apis/pluginregistration/v1
# k8s.io/kubernetes v1.19.8 => github.com/k3s-io/kubernetes v1.19.8-k3s1
# k8s.io/kubernetes v1.19.9 => github.com/k3s-io/kubernetes v1.19.9-k3s1
## explicit
k8s.io/kubernetes/cmd/cloud-controller-manager/app
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
@ -2839,7 +2839,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.8-k3s1
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.9-k3s1
k8s.io/legacy-cloud-providers/aws
k8s.io/legacy-cloud-providers/azure
k8s.io/legacy-cloud-providers/azure/auth
@ -2881,7 +2881,7 @@ k8s.io/legacy-cloud-providers/openstack
k8s.io/legacy-cloud-providers/vsphere
k8s.io/legacy-cloud-providers/vsphere/vclib
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.19.8-k3s1
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.19.9-k3s1
k8s.io/metrics/pkg/apis/custom_metrics
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
@ -2912,7 +2912,7 @@ k8s.io/utils/path
k8s.io/utils/pointer
k8s.io/utils/strings
k8s.io/utils/trace
# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9
# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
# sigs.k8s.io/kustomize v2.0.3+incompatible
@ -2978,29 +2978,29 @@ vbom.ml/util/sortorder
# google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
# google.golang.org/grpc => google.golang.org/grpc v1.27.1
# gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
# k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.19.8-k3s1
# k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.8-k3s1
# k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.19.8-k3s1
# k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.19.8-k3s1
# k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.19.8-k3s1
# k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.19.8-k3s1
# k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.19.8-k3s1
# k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.8-k3s1
# k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.19.8-k3s1
# k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.19.8-k3s1
# k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.19.8-k3s1
# k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.8-k3s1
# k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.8-k3s1
# k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.8-k3s1
# k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.19.8-k3s1
# k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.8-k3s1
# k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.19.8-k3s1
# k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.19.8-k3s1
# k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.19.8-k3s1
# k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.8-k3s1
# k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.19.8-k3s1
# k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.19.8-k3s1
# k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.8-k3s1
# k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.8-k3s1
# k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.19.8-k3s1
# k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.19.9-k3s1
# k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.9-k3s1
# k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.19.9-k3s1
# k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.19.9-k3s1
# k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.19.9-k3s1
# k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.19.9-k3s1
# k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.19.9-k3s1
# k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.9-k3s1
# k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.19.9-k3s1
# k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.19.9-k3s1
# k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.19.9-k3s1
# k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.9-k3s1
# k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.9-k3s1
# k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.9-k3s1
# k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.19.9-k3s1
# k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.9-k3s1
# k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.19.9-k3s1
# k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.19.9-k3s1
# k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.19.9-k3s1
# k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.9-k3s1
# k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.19.9-k3s1
# k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.19.9-k3s1
# k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.9-k3s1
# k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.9-k3s1
# k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.19.9-k3s1
# mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34

View File

@ -51,6 +51,12 @@ type grpcTunnel struct {
connsLock sync.RWMutex
}
type clientConn interface {
Close() error
}
var _ clientConn = &grpc.ClientConn{}
// CreateSingleUseGrpcTunnel creates a Tunnel to dial to a remote server through a
// gRPC based proxy service.
// Currently, a single tunnel supports a single connection, and the tunnel is closed when the connection is terminated
@ -79,7 +85,7 @@ func CreateSingleUseGrpcTunnel(address string, opts ...grpc.DialOption) (Tunnel,
return tunnel, nil
}
func (t *grpcTunnel) serve(c *grpc.ClientConn) {
func (t *grpcTunnel) serve(c clientConn) {
defer c.Close()
for {
@ -88,11 +94,11 @@ func (t *grpcTunnel) serve(c *grpc.ClientConn) {
return
}
if err != nil || pkt == nil {
klog.Warningf("stream read error: %v", err)
klog.ErrorS(err, "stream read failure")
return
}
klog.V(6).Infof("[tracing] recv packet, type: %s", pkt.Type)
klog.V(5).InfoS("[tracing] recv packet", "type", pkt.Type)
switch pkt.Type {
case client.PacketType_DIAL_RSP:
@ -102,13 +108,19 @@ func (t *grpcTunnel) serve(c *grpc.ClientConn) {
t.pendingDialLock.RUnlock()
if !ok {
klog.Warning("DialResp not recognized; dropped")
klog.V(1).Infoln("DialResp not recognized; dropped")
} else {
ch <- dialResult{
err: resp.Error,
connid: resp.ConnectID,
}
}
if resp.Error != "" {
// On dial error, avoid leaking serve goroutine.
return
}
case client.PacketType_DATA:
resp := pkt.GetData()
// TODO: flow control
@ -119,7 +131,7 @@ func (t *grpcTunnel) serve(c *grpc.ClientConn) {
if ok {
conn.readCh <- resp.Data
} else {
klog.Warningf("connection id %d not recognized", resp.ConnectID)
klog.V(1).InfoS("connection not recognized", "connectionID", resp.ConnectID)
}
case client.PacketType_CLOSE_RSP:
resp := pkt.GetCloseResponse()
@ -136,7 +148,7 @@ func (t *grpcTunnel) serve(c *grpc.ClientConn) {
t.connsLock.Unlock()
return
}
klog.Warningf("connection id %d not recognized", resp.ConnectID)
klog.V(1).InfoS("connection not recognized", "connectionID", resp.ConnectID)
}
}
}
@ -169,14 +181,14 @@ func (t *grpcTunnel) Dial(protocol, address string) (net.Conn, error) {
},
},
}
klog.V(6).Infof("[tracing] send packet, type: %s", req.Type)
klog.V(5).InfoS("[tracing] send packet", "type", req.Type)
err := t.stream.Send(req)
if err != nil {
return nil, err
}
klog.Info("DIAL_REQ sent to proxy server")
klog.V(5).Infoln("DIAL_REQ sent to proxy server")
c := &conn{stream: t.stream}

View File

@ -54,7 +54,7 @@ func (c *conn) Write(data []byte) (n int, err error) {
},
}
klog.V(6).Infof("[tracing] send req, type: %s", req.Type)
klog.V(5).InfoS("[tracing] send req", "type", req.Type)
err = c.stream.Send(req)
if err != nil {
@ -112,7 +112,7 @@ func (c *conn) SetWriteDeadline(t time.Time) error {
// Close closes the connection. It also sends CLOSE_REQ packet over
// proxy service to notify remote to drop the connection.
func (c *conn) Close() error {
klog.Info("conn.Close()")
klog.V(4).Infoln("closing connection")
req := &client.Packet{
Type: client.PacketType_CLOSE_REQ,
Payload: &client.Packet_CloseRequest{
@ -122,7 +122,7 @@ func (c *conn) Close() error {
},
}
klog.V(6).Infof("[tracing] send req, type: %s", req.Type)
klog.V(5).InfoS("[tracing] send req", "type", req.Type)
if err := c.stream.Send(req); err != nil {
return err