[WIP] v1.18.9-k3s1

Signed-off-by: MonzElmasry <menna.elmasry@rancher.com>
pull/2268/head
MonzElmasry 2020-09-17 00:40:19 +02:00
parent b9542ef001
commit 44139e60f5
No known key found for this signature in database
GPG Key ID: 7E8A4F7221E39452
70 changed files with 645 additions and 432 deletions

50
go.mod
View File

@ -33,31 +33,31 @@ replace (
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.8-k3s1 k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.9-k3s1
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.8-k3s1 k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.9-k3s1
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.8-k3s1 k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.9-k3s1
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.8-k3s1 k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.9-k3s1
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.8-k3s1 k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.9-k3s1
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.8-k3s1 k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.9-k3s1
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.8-k3s1 k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.9-k3s1
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.8-k3s1 k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.9-k3s1
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.8-k3s1 k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.9-k3s1
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.8-k3s1 k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.9-k3s1
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.8-k3s1 k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.9-k3s1
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.8-k3s1 k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.9-k3s1
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.8-k3s1 k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.9-k3s1
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.8-k3s1 k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.9-k3s1
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.8-k3s1 k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.9-k3s1
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.8-k3s1 k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.9-k3s1
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.8-k3s1 k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.9-k3s1
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.8-k3s1 k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.9-k3s1
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.8-k3s1 k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.9-k3s1
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.8-k3s1 k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.9-k3s1
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.8-k3s1 k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.9-k3s1
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.8-k3s1 k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.9-k3s1
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.8-k3s1 k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.9-k3s1
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.8-k3s1 k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.9-k3s1
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.8-k3s1 k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.9-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
) )

90
go.sum
View File

@ -198,10 +198,11 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY=
github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw=
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
@ -433,7 +434,6 @@ github.com/insomniacslk/dhcp v0.0.0-20190712084813-dc1a53400564/go.mod h1:CfMdgu
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/jamescun/tuntap v0.0.0-20190712092105-cb1fb277045c/go.mod h1:zzwpsgcYhzzIP5WyF8g9ivCv38cY9uAV9Gu0m3lThhE= github.com/jamescun/tuntap v0.0.0-20190712092105-cb1fb277045c/go.mod h1:zzwpsgcYhzzIP5WyF8g9ivCv38cY9uAV9Gu0m3lThhE=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jetstack/cert-manager v0.7.2/go.mod h1:nbddmhjWxYGt04bxvwVGUSeLhZ2PCyNvd7MpXdq+yWY= github.com/jetstack/cert-manager v0.7.2/go.mod h1:nbddmhjWxYGt04bxvwVGUSeLhZ2PCyNvd7MpXdq+yWY=
github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
@ -637,49 +637,49 @@ github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd h1:KPnQ
github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd/go.mod h1:QYmg8cqWPPfIbpEuhtJbEdWwA6PEKSY016Z6EdfL9+8= github.com/rancher/juju-to-pkg-errors v0.0.0-20200701001603-16f3c28b59bd/go.mod h1:QYmg8cqWPPfIbpEuhtJbEdWwA6PEKSY016Z6EdfL9+8=
github.com/rancher/kine v0.4.0 h1:1IhWy3TzjExG8xnj46eyUEWdzqNAD1WrgL4eEBKm6Uc= github.com/rancher/kine v0.4.0 h1:1IhWy3TzjExG8xnj46eyUEWdzqNAD1WrgL4eEBKm6Uc=
github.com/rancher/kine v0.4.0/go.mod h1:IImtCJ68AIkE+VY/kUI0NkyJL5q5WzO8QvMsSXqbrpA= github.com/rancher/kine v0.4.0/go.mod h1:IImtCJ68AIkE+VY/kUI0NkyJL5q5WzO8QvMsSXqbrpA=
github.com/rancher/kubernetes v1.18.8-k3s1 h1:dqukpA38pR5V7opoTDDslVYIxlM3sP5535u3Zd23W2I= github.com/rancher/kubernetes v1.18.9-k3s1 h1:LLJLc7p+Xmt3KeF7jkLz2MuS0+0KRE9fTDprUX4Y9RA=
github.com/rancher/kubernetes v1.18.8-k3s1/go.mod h1:SU7bBi8ZNHRjqzNhY4U78gClS1O7Q7avCrfF5aSiDko= github.com/rancher/kubernetes v1.18.9-k3s1/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.8-k3s1 h1:GEbvokWECNmp/nZm40JeQ/DGa1riPeKFDqJZs/VPQMU= github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.9-k3s1 h1:qTAC4DGioKxoy3b0JFTWjw3ShGcKqjNTUh2AKfvkMBU=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.8-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w= github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.9-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.8-k3s1 h1:lK7L5DnztJd9jcgfYwsXWRHq2EHPAgiD7hIX/9HUVJE= github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.9-k3s1 h1:u3lZHqe48csUFUZycU4W8hyvgKt3PJZqeALxm9t5PKA=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.8-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk= github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.9-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.8-k3s1 h1:LNn6Vx4nNHDc+ckqBKsRbpbm+Eh0kWHvCvpC0np3JVM= github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.9-k3s1 h1:/EItzAufFgB0GbygS2dotV2HY30U8HoWu3c7QSw9P9M=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.8-k3s1/go.mod h1:EWaS4Y9oElqOAwD3kKDYBACo+zW/N3epe1THeEbG72k= github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.9-k3s1/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.8-k3s1 h1:FmUl8p1damR3F5GxMtXM4tteIr/a0Akx+48qlU7hOKA= github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.9-k3s1 h1:ipSuqeFd8lmKFyZk5VabMOOgViNrItz61J9QZS6DNpY=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.8-k3s1/go.mod h1:iiHJKeJoHT/SawjIpPfHQ+5o47HW8mlzjYvADYbnHrk= github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.9-k3s1/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.8-k3s1 h1:gJ9/3Vaen+SdjVQsCScCY5/zcZ/sLOqlOhlTdPUoD8s= github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.9-k3s1 h1:BL+V4zSgs77zGy0f1XqnKXs3lJW0pBw9zR9pT6bQtMA=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.8-k3s1/go.mod h1:Las5K5JupAs7RlnSvh6AiOsz0P0t3zylPnqQ9RDxaGA= github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.9-k3s1/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.8-k3s1 h1:KTmdV7Egc777OeJqs6F3CurMSJlUE2HSr6nomO1G900= github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.9-k3s1 h1:dA86m3H1M/oFV9VICMMxNStfVeQaZIuFxg7GAVEkNqI=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.8-k3s1/go.mod h1:/LATWw92UfCIu8M1NjrVaOtVJ9buBJZS9Zvj0BtY5Ac= github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.9-k3s1/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.8-k3s1 h1:s/7BrWAaJX9ngv+q3LmkkaKpozIM3gOcWPEXbxFDqxc= github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.9-k3s1 h1:t728oClyjfhX0VI9o3J8X7POJiogVDZK5rLPVIGnd8g=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.8-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI= github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.9-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.8-k3s1 h1:ZEG20//RPRbrKX1EVpsZN8jASYKXcwVDXPk9+o0l27Q= github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.9-k3s1 h1:SzGana3eKbrMGFaV4FKFZIoIz2t8sVaczZoCCXMN1OU=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.8-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8= github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.9-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.8-k3s1 h1:DzakaPPGg9RHo81xa65tR0k6Ds8xmHpaH+OLG35y+Nk= github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.9-k3s1 h1:QOCk40d0s4/IQvUnYXu5hdGsPkhdnGiS6YxpMikUKJM=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.8-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw= github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.9-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.8-k3s1 h1:eiuJQhX0XaU/XVFIxMKbuKUXbt5c2vAl7dsaQeuB+Zg= github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.9-k3s1 h1:MNmBot3Rj6QDVxigJvcxXWOPKCm5NM8ACEDk1nvXT/4=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.8-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q= github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.9-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.8-k3s1 h1:G+2GcKHBfsnnyj5Fuqj6ks6DG6hEQyZrIloRxYHV1lw= github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.9-k3s1 h1:KRlY1Hljsh/qBbB/DASEBdYMPxFRNkMpOltpIURjMTI=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.8-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk= github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.9-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.8-k3s1 h1:x0Z1PPkojOpsrQbjIQoZQ9Tie7X5h/17YvluEtVks0Y= github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.9-k3s1 h1:nkGWt+by8lBBmOeytS81Xt4vnn9OMA1DftLKtObbxdE=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.8-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE= github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.9-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.8-k3s1 h1:COXcOjKFrmfayOoZT4OmzTlo0JdEcbOkm5YKLa4FTg0= github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.9-k3s1 h1:A7Elvwo8Cy14hhKAjDuvkaE1xFLqJehqdLQVkM6iBwM=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.8-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8= github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.9-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.8-k3s1 h1:NyD3nsuNkr6Gq/kyLJvUU941fwmtAwVSec14oSKm84g= github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.9-k3s1 h1:+S7ag7Rqe5KCzY+i5rN45ckwBIAc/h9wmj2ol0NCdjU=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.8-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY= github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.9-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.8-k3s1 h1:0LkqtqPCq8UssLzSNQo1u+r9tqXQZaXMT05RJ90SemA= github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.9-k3s1 h1:jwnEH/KEl67g1bb+kOAW+UlA5pSqf0h969fi88y4U2E=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.8-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU= github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.9-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.8-k3s1 h1:tb76wY82Q/BwvXZNt+Vdxkm+AEa6UQw47btLa2OeIGo= github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.9-k3s1 h1:0Ai5nstkLanNLfrix1pFsVfvc8NLvxCEDwS5Qsf5Dic=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.8-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI= github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.9-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.8-k3s1 h1:ya9WTAduoFNKHsB/sMdXJPApMr58YSUyXRoJH0nhLOI= github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.9-k3s1 h1:qYbKao9YdKDNZyzbQeo+uIuBCGviy3PbkVSE6j0zAjk=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.8-k3s1/go.mod h1:YK9Z0Z/3MCo+LC6HsodGE8zKhQp8Z9btmCMh+Yi673g= github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.9-k3s1/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.8-k3s1 h1:dhWex7SjpjQ5/iZEo+I3YjIOaQwUFudcE58Hkxgq0Z0= github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.9-k3s1 h1:ebsW5Uu/XIzjnO9P1okUvj1IGmspfmNaUpynfHupUPE=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.8-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew= github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.9-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.8-k3s1 h1:Sfhr2dUomwUq0b3p5/PjKBxGpz2+rz7ucp/GriEdVcA= github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.9-k3s1 h1:PmGk7TPAqdd/ZB3BhBbUPUxqgOiXRFlyjP54l3bnWu8=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.8-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA= github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.9-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.8-k3s1 h1:CPxpH4yeNeta/9cOdX8QGiR6v3RoJz5M9EKmYNX3rB0= github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.9-k3s1 h1:yeuKOUN7YSyZ5uEPN5lZztLKuF5BLSQC37hAQGxa+KA=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.8-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y= github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.9-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.8-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo= github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.9-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U= github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc= github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI= github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI=

View File

@ -1,8 +1,8 @@
language: go language: go
go: go:
- 1.8 - 1.14
- 1.7 - 1.13
install: install:
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
@ -11,6 +11,9 @@ install:
script: script:
- go get - go get
- go test -cover ./... - go test -cover ./...
- cd ./v5
- go get
- go test -cover ./...
notifications: notifications:
email: false email: false

View File

@ -6,7 +6,7 @@ modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this * Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer. list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice * Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution. and/or other materials provided with the distribution.
* Neither the name of the Evan Phoenix nor the names of its contributors * Neither the name of the Evan Phoenix nor the names of its contributors

View File

@ -1,5 +1,5 @@
# JSON-Patch # JSON-Patch
`jsonpatch` is a library which provides functionallity for both applying `jsonpatch` is a library which provides functionality for both applying
[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as [RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
@ -11,10 +11,11 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ie
**Latest and greatest**: **Latest and greatest**:
```bash ```bash
go get -u github.com/evanphx/json-patch go get -u github.com/evanphx/json-patch/v5
``` ```
**Stable Versions**: **Stable Versions**:
* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5`
* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` * Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
(previous versions below `v3` are unavailable) (previous versions below `v3` are unavailable)
@ -82,7 +83,7 @@ When ran, you get the following output:
```bash ```bash
$ go run main.go $ go run main.go
patch document: {"height":null,"name":"Jane"} patch document: {"height":null,"name":"Jane"}
updated tina doc: {"age":28,"name":"Jane"} updated alternative doc: {"age":28,"name":"Jane"}
``` ```
## Create and apply a JSON Patch ## Create and apply a JSON Patch
@ -164,7 +165,7 @@ func main() {
} }
if !jsonpatch.Equal(original, different) { if !jsonpatch.Equal(original, different) {
fmt.Println(`"original" is _not_ structurally equal to "similar"`) fmt.Println(`"original" is _not_ structurally equal to "different"`)
} }
} }
``` ```
@ -173,7 +174,7 @@ When ran, you get the following output:
```bash ```bash
$ go run main.go $ go run main.go
"original" is structurally equal to "similar" "original" is structurally equal to "similar"
"original" is _not_ structurally equal to "similar" "original" is _not_ structurally equal to "different"
``` ```
## Combine merge patches ## Combine merge patches

View File

@ -307,13 +307,16 @@ func matchesValue(av, bv interface{}) bool {
return true return true
case map[string]interface{}: case map[string]interface{}:
bt := bv.(map[string]interface{}) bt := bv.(map[string]interface{})
for key := range at { if len(bt) != len(at) {
if !matchesValue(at[key], bt[key]) { return false
return false
}
} }
for key := range bt { for key := range bt {
if !matchesValue(at[key], bt[key]) { av, aOK := at[key]
bv, bOK := bt[key]
if aOK != bOK {
return false
}
if !matchesValue(av, bv) {
return false return false
} }
} }

View File

@ -202,6 +202,10 @@ func (n *lazyNode) equal(o *lazyNode) bool {
return false return false
} }
if len(n.doc) != len(o.doc) {
return false
}
for k, v := range n.doc { for k, v := range n.doc {
ov, ok := o.doc[k] ov, ok := o.doc[k]
@ -209,6 +213,10 @@ func (n *lazyNode) equal(o *lazyNode) bool {
return false return false
} }
if (v == nil) != (ov == nil) {
return false
}
if v == nil && ov == nil { if v == nil && ov == nil {
continue continue
} }
@ -429,14 +437,14 @@ func (d *partialArray) add(key string, val *lazyNode) error {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
} }
if SupportNegativeIndices { if idx < 0 {
if !SupportNegativeIndices {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < -len(ary) { if idx < -len(ary) {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
} }
idx += len(ary)
if idx < 0 {
idx += len(ary)
}
} }
copy(ary[0:idx], cur[0:idx]) copy(ary[0:idx], cur[0:idx])
@ -473,14 +481,14 @@ func (d *partialArray) remove(key string) error {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
} }
if SupportNegativeIndices { if idx < 0 {
if !SupportNegativeIndices {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < -len(cur) { if idx < -len(cur) {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
} }
idx += len(cur)
if idx < 0 {
idx += len(cur)
}
} }
ary := make([]*lazyNode, len(cur)-1) ary := make([]*lazyNode, len(cur)-1)

View File

@ -117,10 +117,37 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime.
} }
} }
if !groupsSpecified && username != user.Anonymous { if username != user.Anonymous {
// When impersonating a non-anonymous user, if no groups were specified // When impersonating a non-anonymous user, include the 'system:authenticated' group
// include the system:authenticated group in the impersonated user info // in the impersonated user info:
groups = append(groups, user.AllAuthenticated) // - if no groups were specified
// - if a group has been specified other than 'system:authenticated'
//
// If 'system:unauthenticated' group has been specified we should not include
// the 'system:authenticated' group.
addAuthenticated := true
for _, group := range groups {
if group == user.AllAuthenticated || group == user.AllUnauthenticated {
addAuthenticated = false
break
}
}
if addAuthenticated {
groups = append(groups, user.AllAuthenticated)
}
} else {
addUnauthenticated := true
for _, group := range groups {
if group == user.AllUnauthenticated {
addUnauthenticated = false
break
}
}
if addUnauthenticated {
groups = append(groups, user.AllUnauthenticated)
}
} }
newUser := &user.DefaultInfo{ newUser := &user.DefaultInfo{

View File

@ -547,7 +547,7 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor
newItemFunc := getNewItemFunc(listObj, v) newItemFunc := getNewItemFunc(listObj, v)
var returnedRV, continueRV int64 var returnedRV, continueRV, withRev int64
var continueKey string var continueKey string
switch { switch {
case s.pagingEnabled && len(pred.Continue) > 0: case s.pagingEnabled && len(pred.Continue) > 0:
@ -568,7 +568,7 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor
// continueRV==0 is invalid. // continueRV==0 is invalid.
// If continueRV < 0, the request is for the latest resource version. // If continueRV < 0, the request is for the latest resource version.
if continueRV > 0 { if continueRV > 0 {
options = append(options, clientv3.WithRev(continueRV)) withRev = continueRV
returnedRV = continueRV returnedRV = continueRV
} }
case s.pagingEnabled && pred.Limit > 0: case s.pagingEnabled && pred.Limit > 0:
@ -578,7 +578,7 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
} }
if fromRV > 0 { if fromRV > 0 {
options = append(options, clientv3.WithRev(int64(fromRV))) withRev = int64(fromRV)
} }
returnedRV = int64(fromRV) returnedRV = int64(fromRV)
} }
@ -589,6 +589,9 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor
default: default:
options = append(options, clientv3.WithPrefix()) options = append(options, clientv3.WithPrefix())
} }
if withRev != 0 {
options = append(options, clientv3.WithRev(withRev))
}
// loop until we have filled the requested limit from etcd or there are no more results // loop until we have filled the requested limit from etcd or there are no more results
var lastKey []byte var lastKey []byte
@ -654,6 +657,10 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor
break break
} }
key = string(lastKey) + "\x00" key = string(lastKey) + "\x00"
if withRev == 0 {
withRev = returnedRV
options = append(options, clientv3.WithRev(withRev))
}
} }
// instruct the client to begin querying from immediately after the last key we returned // instruct the client to begin querying from immediately after the last key we returned

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "18" gitMinor = "18"
gitVersion = "v1.18.8-k3s1" gitVersion = "v1.18.9-k3s1"
gitCommit = "b86d0e4a07fd882c2f9718f4e82b06dfd4b55195" gitCommit = "f1d9dca4e9681e74faee7359af3cc2df01a4b6d6"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2020-08-13T18:53:34Z" buildDate = "2020-09-16T22:20:24Z"
) )

View File

@ -138,11 +138,11 @@ func (c *controller) Run(stopCh <-chan struct{}) {
c.reflectorMutex.Unlock() c.reflectorMutex.Unlock()
var wg wait.Group var wg wait.Group
defer wg.Wait()
wg.StartWithChannel(stopCh, r.Run) wg.StartWithChannel(stopCh, r.Run)
wait.Until(c.processLoop, time.Second, stopCh) wait.Until(c.processLoop, time.Second, stopCh)
wg.Wait()
} }
// Returns true once this controller has completed an initial resource listing // Returns true once this controller has completed an initial resource listing

View File

@ -551,5 +551,26 @@ func isExpiredError(err error) bool {
} }
func isTooLargeResourceVersionError(err error) bool { func isTooLargeResourceVersionError(err error) bool {
return apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) if apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) {
return true
}
// In Kubernetes 1.17.0-1.18.5, the api server doesn't set the error status cause to
// metav1.CauseTypeResourceVersionTooLarge to indicate that the requested minimum resource
// version is larger than the largest currently available resource version. To ensure backward
// compatibility with these server versions we also need to detect the error based on the content
// of the error message field.
if !apierrors.IsTimeout(err) {
return false
}
apierr, ok := err.(apierrors.APIStatus)
if !ok || apierr == nil || apierr.Status().Details == nil {
return false
}
for _, cause := range apierr.Status().Details.Causes {
// Matches the message returned by api server 1.17.0-1.18.5 for this error condition
if cause.Message == "Too large resource version" {
return true
}
}
return false
} }

View File

@ -20,8 +20,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b h1:vCplRbYcTTeBVLjIU0KvipEeVBSxl6sakUBRmeLBTkw= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@ -65,7 +65,6 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "18" gitMinor = "18"
gitVersion = "v1.18.8-k3s1" gitVersion = "v1.18.9-k3s1"
gitCommit = "b86d0e4a07fd882c2f9718f4e82b06dfd4b55195" gitCommit = "f1d9dca4e9681e74faee7359af3cc2df01a4b6d6"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2020-08-13T18:53:34Z" buildDate = "2020-09-16T22:20:24Z"
) )

View File

@ -20,7 +20,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
@ -58,7 +58,6 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=

View File

@ -3640,12 +3640,19 @@ func DescribeEvents(el *corev1.EventList, w PrefixWriter) {
interval = fmt.Sprintf("%s (x%d over %s)", translateTimestampSince(e.LastTimestamp), e.Count, translateTimestampSince(e.FirstTimestamp)) interval = fmt.Sprintf("%s (x%d over %s)", translateTimestampSince(e.LastTimestamp), e.Count, translateTimestampSince(e.FirstTimestamp))
} else { } else {
interval = translateTimestampSince(e.FirstTimestamp) interval = translateTimestampSince(e.FirstTimestamp)
if e.FirstTimestamp.IsZero() {
interval = translateMicroTimestampSince(e.EventTime)
}
}
source := e.Source.Component
if source == "" {
source = e.ReportingController
} }
w.Write(LEVEL_1, "%v\t%v\t%s\t%v\t%v\n", w.Write(LEVEL_1, "%v\t%v\t%s\t%v\t%v\n",
e.Type, e.Type,
e.Reason, e.Reason,
interval, interval,
formatEventSource(e.Source), source,
strings.TrimSpace(e.Message), strings.TrimSpace(e.Message),
) )
} }
@ -4739,6 +4746,16 @@ func shorten(s string, maxLength int) string {
return s return s
} }
// translateMicroTimestampSince returns the elapsed time since timestamp in
// human-readable approximation.
func translateMicroTimestampSince(timestamp metav1.MicroTime) string {
if timestamp.IsZero() {
return "<unknown>"
}
return duration.HumanDuration(time.Since(timestamp.Time))
}
// translateTimestampSince returns the elapsed time since timestamp in // translateTimestampSince returns the elapsed time since timestamp in
// human-readable approximation. // human-readable approximation.
func translateTimestampSince(timestamp metav1.Time) string { func translateTimestampSince(timestamp metav1.Time) string {
@ -4749,15 +4766,6 @@ func translateTimestampSince(timestamp metav1.Time) string {
return duration.HumanDuration(time.Since(timestamp.Time)) return duration.HumanDuration(time.Since(timestamp.Time))
} }
// formatEventSource formats EventSource as a comma separated string excluding Host when empty
func formatEventSource(es corev1.EventSource) string {
EventSourceString := []string{es.Component}
if len(es.Host) > 0 {
EventSourceString = append(EventSourceString, es.Host)
}
return strings.Join(EventSourceString, ", ")
}
// Pass ports=nil for all ports. // Pass ports=nil for all ports.
func formatEndpoints(endpoints *corev1.Endpoints, ports sets.String) string { func formatEndpoints(endpoints *corev1.Endpoints, ports sets.String) string {
if len(endpoints.Subsets) == 0 { if len(endpoints.Subsets) == 0 {

View File

@ -20,6 +20,7 @@ go_test(
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -602,7 +602,11 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
return fmt.Errorf("object does not have ObjectMeta, %v", err) return fmt.Errorf("object does not have ObjectMeta, %v", err)
} }
klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID)
if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, metav1.DeleteOptions{}); err != nil {
if apierrors.IsNotFound(err) {
klog.V(4).Infof("pod %v/%v has already been deleted.", namespace, podID)
return err
}
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
return fmt.Errorf("unable to delete pods: %v", err) return fmt.Errorf("unable to delete pods: %v", err)
} }

View File

@ -30,6 +30,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality" apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
@ -1005,10 +1006,12 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
go func(ix int) { go func(ix int) {
defer deleteWait.Done() defer deleteWait.Done()
if err := dsc.podControl.DeletePod(ds.Namespace, podsToDelete[ix], ds); err != nil { if err := dsc.podControl.DeletePod(ds.Namespace, podsToDelete[ix], ds); err != nil {
klog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
dsc.expectations.DeletionObserved(dsKey) dsc.expectations.DeletionObserved(dsKey)
errCh <- err if !apierrors.IsNotFound(err) {
utilruntime.HandleError(err) klog.V(2).Infof("Failed deletion, decremented expectations for set %q/%q", ds.Namespace, ds.Name)
errCh <- err
utilruntime.HandleError(err)
}
} }
}(i) }(i)
} }

View File

@ -48,7 +48,6 @@ go_test(
"//pkg/api/v1/endpoints:go_default_library", "//pkg/api/v1/endpoints:go_default_library",
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/controller:go_default_library", "//pkg/controller:go_default_library",
"//pkg/controller/util/endpoint:go_default_library",
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -65,6 +64,7 @@ go_test(
"//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
"//vendor/k8s.io/utils/net:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library",
], ],
) )

View File

@ -19,7 +19,6 @@ package endpoint
import ( import (
"context" "context"
"fmt" "fmt"
"reflect"
"strconv" "strconv"
"time" "time"
@ -213,39 +212,27 @@ func (e *EndpointController) addPod(obj interface{}) {
} }
func podToEndpointAddressForService(svc *v1.Service, pod *v1.Pod) (*v1.EndpointAddress, error) { func podToEndpointAddressForService(svc *v1.Service, pod *v1.Pod) (*v1.EndpointAddress, error) {
var endpointIP string
if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) { if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) {
return podToEndpointAddress(pod), nil // In a legacy cluster, the pod IP is guaranteed to be usable
} endpointIP = pod.Status.PodIP
} else {
// api-server service controller ensured that the service got the correct IP Family ipv6Service := endpointutil.IsIPv6Service(svc)
// according to user setup, here we only need to match EndPoint IPs' family to service for _, podIP := range pod.Status.PodIPs {
// actual IP family. as in, we don't need to check service.IPFamily ipv6PodIP := utilnet.IsIPv6String(podIP.IP)
if ipv6Service == ipv6PodIP {
ipv6ClusterIP := utilnet.IsIPv6String(svc.Spec.ClusterIP) endpointIP = podIP.IP
for _, podIP := range pod.Status.PodIPs { break
ipv6PodIP := utilnet.IsIPv6String(podIP.IP) }
// same family? }
// TODO (khenidak) when we remove the max of 2 PodIP limit from pods if endpointIP == "" {
// we will have to return multiple endpoint addresses return nil, fmt.Errorf("failed to find a matching endpoint for service %v", svc.Name)
if ipv6ClusterIP == ipv6PodIP {
return &v1.EndpointAddress{
IP: podIP.IP,
NodeName: &pod.Spec.NodeName,
TargetRef: &v1.ObjectReference{
Kind: "Pod",
Namespace: pod.ObjectMeta.Namespace,
Name: pod.ObjectMeta.Name,
UID: pod.ObjectMeta.UID,
ResourceVersion: pod.ObjectMeta.ResourceVersion,
}}, nil
} }
} }
return nil, fmt.Errorf("failed to find a matching endpoint for service %v", svc.Name)
}
func podToEndpointAddress(pod *v1.Pod) *v1.EndpointAddress {
return &v1.EndpointAddress{ return &v1.EndpointAddress{
IP: pod.Status.PodIP, IP: endpointIP,
NodeName: &pod.Spec.NodeName, NodeName: &pod.Spec.NodeName,
TargetRef: &v1.ObjectReference{ TargetRef: &v1.ObjectReference{
Kind: "Pod", Kind: "Pod",
@ -253,24 +240,15 @@ func podToEndpointAddress(pod *v1.Pod) *v1.EndpointAddress {
Name: pod.ObjectMeta.Name, Name: pod.ObjectMeta.Name,
UID: pod.ObjectMeta.UID, UID: pod.ObjectMeta.UID,
ResourceVersion: pod.ObjectMeta.ResourceVersion, ResourceVersion: pod.ObjectMeta.ResourceVersion,
}} },
} }, nil
func endpointChanged(pod1, pod2 *v1.Pod) bool {
endpointAddress1 := podToEndpointAddress(pod1)
endpointAddress2 := podToEndpointAddress(pod2)
endpointAddress1.TargetRef.ResourceVersion = ""
endpointAddress2.TargetRef.ResourceVersion = ""
return !reflect.DeepEqual(endpointAddress1, endpointAddress2)
} }
// When a pod is updated, figure out what services it used to be a member of // When a pod is updated, figure out what services it used to be a member of
// and what services it will be a member of, and enqueue the union of these. // and what services it will be a member of, and enqueue the union of these.
// old and cur must be *v1.Pod types. // old and cur must be *v1.Pod types.
func (e *EndpointController) updatePod(old, cur interface{}) { func (e *EndpointController) updatePod(old, cur interface{}) {
services := endpointutil.GetServicesToUpdateOnPodChange(e.serviceLister, e.serviceSelectorCache, old, cur, endpointChanged) services := endpointutil.GetServicesToUpdateOnPodChange(e.serviceLister, e.serviceSelectorCache, old, cur)
for key := range services { for key := range services {
e.queue.AddAfter(key, e.endpointUpdatesBatchPeriod) e.queue.AddAfter(key, e.endpointUpdatesBatchPeriod)
} }

View File

@ -26,7 +26,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",

View File

@ -213,7 +213,7 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
klog.Infof("Starting endpoint slice controller") klog.Infof("Starting endpoint slice controller")
defer klog.Infof("Shutting down endpoint slice controller") defer klog.Infof("Shutting down endpoint slice controller")
if !cache.WaitForNamedCacheSync("endpoint_slice", stopCh, c.podsSynced, c.servicesSynced) { if !cache.WaitForNamedCacheSync("endpoint_slice", stopCh, c.podsSynced, c.servicesSynced, c.endpointSlicesSynced, c.nodesSynced) {
return return
} }
@ -425,7 +425,7 @@ func (c *Controller) addPod(obj interface{}) {
} }
func (c *Controller) updatePod(old, cur interface{}) { func (c *Controller) updatePod(old, cur interface{}) {
services := endpointutil.GetServicesToUpdateOnPodChange(c.serviceLister, c.serviceSelectorCache, old, cur, podEndpointChanged) services := endpointutil.GetServicesToUpdateOnPodChange(c.serviceLister, c.serviceSelectorCache, old, cur)
for key := range services { for key := range services {
c.queue.AddAfter(key, c.endpointUpdatesBatchPeriod) c.queue.AddAfter(key, c.endpointUpdatesBatchPeriod)
} }

View File

@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1" corelisters "k8s.io/client-go/listers/core/v1"
@ -59,7 +58,7 @@ type endpointMeta struct {
func (r *reconciler) reconcile(service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time) error { func (r *reconciler) reconcile(service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time) error {
addressType := discovery.AddressTypeIPv4 addressType := discovery.AddressTypeIPv4
if isIPv6Service(service) { if endpointutil.IsIPv6Service(service) {
addressType = discovery.AddressTypeIPv6 addressType = discovery.AddressTypeIPv6
} }
@ -180,8 +179,6 @@ func (r *reconciler) finalize(
slicesToDelete []*discovery.EndpointSlice, slicesToDelete []*discovery.EndpointSlice,
triggerTime time.Time, triggerTime time.Time,
) error { ) error {
errs := []error{}
// If there are slices to create and delete, change the creates to updates // If there are slices to create and delete, change the creates to updates
// of the slices that would otherwise be deleted. // of the slices that would otherwise be deleted.
for i := 0; i < len(slicesToDelete); { for i := 0; i < len(slicesToDelete); {
@ -206,16 +203,20 @@ func (r *reconciler) finalize(
} }
} }
for _, endpointSlice := range slicesToCreate { // Don't create new EndpointSlices if the Service is pending deletion. This
addTriggerTimeAnnotation(endpointSlice, triggerTime) // is to avoid a potential race condition with the garbage collector where
createdSlice, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{}) // it tries to delete EndpointSlices as this controller replaces them.
if err != nil { if service.DeletionTimestamp == nil {
// If the namespace is terminating, creates will continue to fail. Simply drop the item. for _, endpointSlice := range slicesToCreate {
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) { addTriggerTimeAnnotation(endpointSlice, triggerTime)
return nil createdSlice, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
if err != nil {
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) {
return nil
}
return fmt.Errorf("failed to create EndpointSlice for Service %s/%s: %v", service.Namespace, service.Name, err)
} }
errs = append(errs, fmt.Errorf("Error creating EndpointSlice for Service %s/%s: %v", service.Namespace, service.Name, err))
} else {
r.endpointSliceTracker.Update(createdSlice) r.endpointSliceTracker.Update(createdSlice)
metrics.EndpointSliceChanges.WithLabelValues("create").Inc() metrics.EndpointSliceChanges.WithLabelValues("create").Inc()
} }
@ -225,24 +226,22 @@ func (r *reconciler) finalize(
addTriggerTimeAnnotation(endpointSlice, triggerTime) addTriggerTimeAnnotation(endpointSlice, triggerTime)
updatedSlice, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice, metav1.UpdateOptions{}) updatedSlice, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice, metav1.UpdateOptions{})
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("Error updating %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)) return fmt.Errorf("failed to update %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)
} else {
r.endpointSliceTracker.Update(updatedSlice)
metrics.EndpointSliceChanges.WithLabelValues("update").Inc()
} }
r.endpointSliceTracker.Update(updatedSlice)
metrics.EndpointSliceChanges.WithLabelValues("update").Inc()
} }
for _, endpointSlice := range slicesToDelete { for _, endpointSlice := range slicesToDelete {
err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Delete(context.TODO(), endpointSlice.Name, metav1.DeleteOptions{}) err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Delete(context.TODO(), endpointSlice.Name, metav1.DeleteOptions{})
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("Error deleting %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)) return fmt.Errorf("failed to delete %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)
} else {
r.endpointSliceTracker.Delete(endpointSlice)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
} }
r.endpointSliceTracker.Delete(endpointSlice)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
} }
return utilerrors.NewAggregate(errs) return nil
} }
// reconcileByPortMapping compares the endpoints found in existing slices with // reconcileByPortMapping compares the endpoints found in existing slices with

View File

@ -18,7 +18,6 @@ package endpointslice
import ( import (
"fmt" "fmt"
"reflect"
"time" "time"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
@ -36,19 +35,7 @@ import (
utilnet "k8s.io/utils/net" utilnet "k8s.io/utils/net"
) )
// podEndpointChanged returns true if the results of podToEndpoint are different // podToEndpoint returns an Endpoint object generated from pod, node, and service.
// for the pods passed to this function.
func podEndpointChanged(pod1, pod2 *corev1.Pod) bool {
endpoint1 := podToEndpoint(pod1, &corev1.Node{}, &corev1.Service{Spec: corev1.ServiceSpec{}})
endpoint2 := podToEndpoint(pod2, &corev1.Node{}, &corev1.Service{Spec: corev1.ServiceSpec{}})
endpoint1.TargetRef.ResourceVersion = ""
endpoint2.TargetRef.ResourceVersion = ""
return !reflect.DeepEqual(endpoint1, endpoint2)
}
// podToEndpoint returns an Endpoint object generated from a Pod and Node.
func podToEndpoint(pod *corev1.Pod, node *corev1.Node, service *corev1.Service) discovery.Endpoint { func podToEndpoint(pod *corev1.Pod, node *corev1.Node, service *corev1.Service) discovery.Endpoint {
// Build out topology information. This is currently limited to hostname, // Build out topology information. This is currently limited to hostname,
// zone, and region, but this will be expanded in the future. // zone, and region, but this will be expanded in the future.
@ -133,7 +120,7 @@ func getEndpointAddresses(podStatus corev1.PodStatus, service *corev1.Service) [
for _, podIP := range podStatus.PodIPs { for _, podIP := range podStatus.PodIPs {
isIPv6PodIP := utilnet.IsIPv6String(podIP.IP) isIPv6PodIP := utilnet.IsIPv6String(podIP.IP)
if isIPv6PodIP == isIPv6Service(service) { if isIPv6PodIP == endpointutil.IsIPv6Service(service) {
addresses = append(addresses, podIP.IP) addresses = append(addresses, podIP.IP)
} }
} }
@ -141,12 +128,6 @@ func getEndpointAddresses(podStatus corev1.PodStatus, service *corev1.Service) [
return addresses return addresses
} }
// isIPv6Service returns true if the Service uses IPv6 addresses.
func isIPv6Service(service *corev1.Service) bool {
// IPFamily is not guaranteed to be set, even in an IPv6 only cluster.
return (service.Spec.IPFamily != nil && *service.Spec.IPFamily == corev1.IPv6Protocol) || utilnet.IsIPv6String(service.Spec.ClusterIP)
}
// endpointsEqualBeyondHash returns true if endpoints have equal attributes // endpointsEqualBeyondHash returns true if endpoints have equal attributes
// but excludes equality checks that would have already been covered with // but excludes equality checks that would have already been covered with
// endpoint hashing (see hashEndpoint func for more info). // endpoint hashing (see hashEndpoint func for more info).

View File

@ -28,6 +28,7 @@ import (
batch "k8s.io/api/batch/v1" batch "k8s.io/api/batch/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@ -614,7 +615,7 @@ func (jm *JobController) deleteJobPods(job *batch.Job, pods []*v1.Pod, errCh cha
for i := int32(0); i < int32(nbPods); i++ { for i := int32(0); i < int32(nbPods); i++ {
go func(ix int32) { go func(ix int32) {
defer wait.Done() defer wait.Done()
if err := jm.podControl.DeletePod(job.Namespace, pods[ix].Name, job); err != nil { if err := jm.podControl.DeletePod(job.Namespace, pods[ix].Name, job); err != nil && !apierrors.IsNotFound(err) {
defer utilruntime.HandleError(err) defer utilruntime.HandleError(err)
klog.V(2).Infof("Failed to delete %v, job %q/%q deadline exceeded", pods[ix].Name, job.Namespace, job.Name) klog.V(2).Infof("Failed to delete %v, job %q/%q deadline exceeded", pods[ix].Name, job.Namespace, job.Name)
errCh <- err errCh <- err
@ -711,14 +712,17 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
go func(ix int32) { go func(ix int32) {
defer wait.Done() defer wait.Done()
if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, job); err != nil { if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, job); err != nil {
defer utilruntime.HandleError(err)
// Decrement the expected number of deletes because the informer won't observe this deletion // Decrement the expected number of deletes because the informer won't observe this deletion
klog.V(2).Infof("Failed to delete %v, decrementing expectations for job %q/%q", activePods[ix].Name, job.Namespace, job.Name)
jm.expectations.DeletionObserved(jobKey) jm.expectations.DeletionObserved(jobKey)
activeLock.Lock() if !apierrors.IsNotFound(err) {
active++ klog.V(2).Infof("Failed to delete %v, decremented expectations for job %q/%q", activePods[ix].Name, job.Namespace, job.Name)
activeLock.Unlock() activeLock.Lock()
errCh <- err active++
activeLock.Unlock()
errCh <- err
utilruntime.HandleError(err)
}
} }
}(i) }(i)
} }

View File

@ -46,7 +46,9 @@ go_test(
"//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/metadata:go_default_library", "//staging/src/k8s.io/client-go/metadata:go_default_library",
"//staging/src/k8s.io/client-go/metadata/fake:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library",
], ],

View File

@ -265,11 +265,9 @@ func (d *namespacedResourcesDeleter) updateNamespaceStatusFunc(namespace *v1.Nam
if namespace.DeletionTimestamp.IsZero() || namespace.Status.Phase == v1.NamespaceTerminating { if namespace.DeletionTimestamp.IsZero() || namespace.Status.Phase == v1.NamespaceTerminating {
return namespace, nil return namespace, nil
} }
newNamespace := v1.Namespace{} newNamespace := namespace.DeepCopy()
newNamespace.ObjectMeta = namespace.ObjectMeta
newNamespace.Status = *namespace.Status.DeepCopy()
newNamespace.Status.Phase = v1.NamespaceTerminating newNamespace.Status.Phase = v1.NamespaceTerminating
return d.nsClient.UpdateStatus(context.TODO(), &newNamespace, metav1.UpdateOptions{}) return d.nsClient.UpdateStatus(context.TODO(), newNamespace, metav1.UpdateOptions{})
} }
// finalized returns true if the namespace.Spec.Finalizers is an empty list // finalized returns true if the namespace.Spec.Finalizers is an empty list
@ -330,10 +328,8 @@ func (d *namespacedResourcesDeleter) deleteCollection(gvr schema.GroupVersionRes
// we have a resource returned in the discovery API that supports no top-level verbs: // we have a resource returned in the discovery API that supports no top-level verbs:
// /apis/extensions/v1beta1/namespaces/default/replicationcontrollers // /apis/extensions/v1beta1/namespaces/default/replicationcontrollers
// when working with this resource type, we will get a literal not found error rather than expected method not supported // when working with this resource type, we will get a literal not found error rather than expected method not supported
// remember next time that this resource does not support delete collection...
if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) { if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) {
klog.V(5).Infof("namespace controller - deleteCollection not supported - namespace: %s, gvr: %v", namespace, gvr) klog.V(5).Infof("namespace controller - deleteCollection not supported - namespace: %s, gvr: %v", namespace, gvr)
d.opCache.setNotSupported(key)
return false, nil return false, nil
} }
@ -365,10 +361,8 @@ func (d *namespacedResourcesDeleter) listCollection(gvr schema.GroupVersionResou
// we have a resource returned in the discovery API that supports no top-level verbs: // we have a resource returned in the discovery API that supports no top-level verbs:
// /apis/extensions/v1beta1/namespaces/default/replicationcontrollers // /apis/extensions/v1beta1/namespaces/default/replicationcontrollers
// when working with this resource type, we will get a literal not found error rather than expected method not supported // when working with this resource type, we will get a literal not found error rather than expected method not supported
// remember next time that this resource does not support delete collection...
if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) { if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) {
klog.V(5).Infof("namespace controller - listCollection not supported - namespace: %s, gvr: %v", namespace, gvr) klog.V(5).Infof("namespace controller - listCollection not supported - namespace: %s, gvr: %v", namespace, gvr)
d.opCache.setNotSupported(key)
return nil, false, nil return nil, false, nil
} }

View File

@ -42,6 +42,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library",

View File

@ -358,7 +358,8 @@ func (tc *NoExecuteTaintManager) processPodOnNode(
minTolerationTime := getMinTolerationTime(usedTolerations) minTolerationTime := getMinTolerationTime(usedTolerations)
// getMinTolerationTime returns negative value to denote infinite toleration. // getMinTolerationTime returns negative value to denote infinite toleration.
if minTolerationTime < 0 { if minTolerationTime < 0 {
klog.V(4).Infof("New tolerations for %v tolerate forever. Scheduled deletion won't be cancelled if already scheduled.", podNamespacedName.String()) klog.V(4).Infof("Current tolerations for %v tolerate forever, cancelling any scheduled deletion.", podNamespacedName.String())
tc.cancelWorkWithEvent(podNamespacedName)
return return
} }

View File

@ -39,6 +39,7 @@ import (
apps "k8s.io/api/apps/v1" apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
@ -620,9 +621,11 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
if err := rsc.podControl.DeletePod(rs.Namespace, targetPod.Name, rs); err != nil { if err := rsc.podControl.DeletePod(rs.Namespace, targetPod.Name, rs); err != nil {
// Decrement the expected number of deletes because the informer won't observe this deletion // Decrement the expected number of deletes because the informer won't observe this deletion
podKey := controller.PodKey(targetPod) podKey := controller.PodKey(targetPod)
klog.V(2).Infof("Failed to delete %v, decrementing expectations for %v %s/%s", podKey, rsc.Kind, rs.Namespace, rs.Name)
rsc.expectations.DeletionObserved(rsKey, podKey) rsc.expectations.DeletionObserved(rsKey, podKey)
errCh <- err if !apierrors.IsNotFound(err) {
klog.V(2).Infof("Failed to delete %v, decremented expectations for %v %s/%s", podKey, rsc.Kind, rs.Namespace, rs.Name)
errCh <- err
}
} }
}(pod) }(pod)
} }

View File

@ -10,6 +10,7 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/api/v1/pod:go_default_library", "//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/controller:go_default_library", "//pkg/controller:go_default_library",
"//pkg/util/hash:go_default_library", "//pkg/util/hash:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
@ -19,6 +20,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/utils/net:go_default_library",
], ],
) )

View File

@ -32,8 +32,10 @@ import (
v1listers "k8s.io/client-go/listers/core/v1" v1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/hash" "k8s.io/kubernetes/pkg/util/hash"
utilnet "k8s.io/utils/net"
) )
// ServiceSelectorCache is a cache of service selectors to avoid high CPU consumption caused by frequent calls to AsSelectorPreValidated (see #73527) // ServiceSelectorCache is a cache of service selectors to avoid high CPU consumption caused by frequent calls to AsSelectorPreValidated (see #73527)
@ -106,9 +108,6 @@ func (sc *ServiceSelectorCache) GetPodServiceMemberships(serviceLister v1listers
return set, nil return set, nil
} }
// EndpointsMatch is a type of function that returns true if pod endpoints match.
type EndpointsMatch func(*v1.Pod, *v1.Pod) bool
// PortMapKey is used to uniquely identify groups of endpoint ports. // PortMapKey is used to uniquely identify groups of endpoint ports.
type PortMapKey string type PortMapKey string
@ -153,9 +152,10 @@ func ShouldSetHostname(pod *v1.Pod, svc *v1.Service) bool {
return len(pod.Spec.Hostname) > 0 && pod.Spec.Subdomain == svc.Name && svc.Namespace == pod.Namespace return len(pod.Spec.Hostname) > 0 && pod.Spec.Subdomain == svc.Name && svc.Namespace == pod.Namespace
} }
// PodChanged returns two boolean values, the first returns true if the pod. // podEndpointsChanged returns two boolean values. The first is true if the pod has
// has changed, the second value returns true if the pod labels have changed. // changed in a way that may change existing endpoints. The second value is true if the
func PodChanged(oldPod, newPod *v1.Pod, endpointChanged EndpointsMatch) (bool, bool) { // pod has changed in a way that may affect which Services it matches.
func podEndpointsChanged(oldPod, newPod *v1.Pod) (bool, bool) {
// Check if the pod labels have changed, indicating a possible // Check if the pod labels have changed, indicating a possible
// change in the service membership // change in the service membership
labelsChanged := false labelsChanged := false
@ -175,16 +175,27 @@ func PodChanged(oldPod, newPod *v1.Pod, endpointChanged EndpointsMatch) (bool, b
if podutil.IsPodReady(oldPod) != podutil.IsPodReady(newPod) { if podutil.IsPodReady(oldPod) != podutil.IsPodReady(newPod) {
return true, labelsChanged return true, labelsChanged
} }
// Convert the pod to an Endpoint, clear inert fields,
// and see if they are the same. // Check if the pod IPs have changed
// TODO: Add a watcher for node changes separate from this if len(oldPod.Status.PodIPs) != len(newPod.Status.PodIPs) {
// We don't want to trigger multiple syncs at a pod level when a node changes return true, labelsChanged
return endpointChanged(newPod, oldPod), labelsChanged }
for i := range oldPod.Status.PodIPs {
if oldPod.Status.PodIPs[i].IP != newPod.Status.PodIPs[i].IP {
return true, labelsChanged
}
}
// Endpoints may also reference a pod's Name, Namespace, UID, and NodeName, but
// the first three are immutable, and NodeName is immutable once initially set,
// which happens before the pod gets an IP.
return false, labelsChanged
} }
// GetServicesToUpdateOnPodChange returns a set of Service keys for Services // GetServicesToUpdateOnPodChange returns a set of Service keys for Services
// that have potentially been affected by a change to this pod. // that have potentially been affected by a change to this pod.
func GetServicesToUpdateOnPodChange(serviceLister v1listers.ServiceLister, selectorCache *ServiceSelectorCache, old, cur interface{}, endpointChanged EndpointsMatch) sets.String { func GetServicesToUpdateOnPodChange(serviceLister v1listers.ServiceLister, selectorCache *ServiceSelectorCache, old, cur interface{}) sets.String {
newPod := cur.(*v1.Pod) newPod := cur.(*v1.Pod)
oldPod := old.(*v1.Pod) oldPod := old.(*v1.Pod)
if newPod.ResourceVersion == oldPod.ResourceVersion { if newPod.ResourceVersion == oldPod.ResourceVersion {
@ -193,7 +204,7 @@ func GetServicesToUpdateOnPodChange(serviceLister v1listers.ServiceLister, selec
return sets.String{} return sets.String{}
} }
podChanged, labelsChanged := PodChanged(oldPod, newPod, endpointChanged) podChanged, labelsChanged := podEndpointsChanged(oldPod, newPod)
// If both the pod and labels are unchanged, no update is needed // If both the pod and labels are unchanged, no update is needed
if !podChanged && !labelsChanged { if !podChanged && !labelsChanged {
@ -266,3 +277,18 @@ func (sl portsInOrder) Less(i, j int) bool {
h2 := DeepHashObjectToString(sl[j]) h2 := DeepHashObjectToString(sl[j])
return h1 < h2 return h1 < h2
} }
// IsIPv6Service checks if svc should have IPv6 endpoints
func IsIPv6Service(svc *v1.Service) bool {
if helper.IsServiceIPSet(svc) {
return utilnet.IsIPv6String(svc.Spec.ClusterIP)
} else if svc.Spec.IPFamily != nil {
return *svc.Spec.IPFamily == v1.IPv6Protocol
} else {
// FIXME: for legacy headless Services with no IPFamily, the current
// thinking is that we should use the cluster default. Unfortunately
// the endpoint controller doesn't know the cluster default. For now,
// assume it's IPv4.
return false
}
}

View File

@ -61,6 +61,8 @@ const (
ErrReasonBindConflict ConflictReason = "node(s) didn't find available persistent volumes to bind" ErrReasonBindConflict ConflictReason = "node(s) didn't find available persistent volumes to bind"
// ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error. // ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error.
ErrReasonNodeConflict ConflictReason = "node(s) had volume node affinity conflict" ErrReasonNodeConflict ConflictReason = "node(s) had volume node affinity conflict"
// ErrUnboundImmediatePVC is used when the pod has an unbound PVC in immedate binding mode.
ErrUnboundImmediatePVC ConflictReason = "pod has unbound immediate PersistentVolumeClaims"
) )
// InTreeToCSITranslator contains methods required to check migratable status // InTreeToCSITranslator contains methods required to check migratable status
@ -258,7 +260,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons Confl
// Immediate claims should be bound // Immediate claims should be bound
if len(unboundClaimsImmediate) > 0 { if len(unboundClaimsImmediate) > 0 {
return nil, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims") return ConflictReasons{ErrUnboundImmediatePVC}, nil
} }
// Check PV node affinity on bound volumes // Check PV node affinity on bound volumes

View File

@ -8,6 +8,7 @@ approvers:
- vishh - vishh
- yujuhong - yujuhong
- dashpole - dashpole
- sjenning
reviewers: reviewers:
- sig-node-reviewers - sig-node-reviewers
labels: labels:

View File

@ -1,4 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sjenning

View File

@ -8,7 +8,6 @@ approvers:
- vishh - vishh
- yujuhong - yujuhong
- ConnorDoyle - ConnorDoyle
- sjenning
- klueska - klueska
reviewers: reviewers:
- sig-node-reviewers - sig-node-reviewers

View File

@ -4,7 +4,6 @@ approvers:
- derekwaynecarr - derekwaynecarr
- vishh - vishh
- ConnorDoyle - ConnorDoyle
- sjenning
- balajismaniam - balajismaniam
reviewers: reviewers:
- klueska - klueska

View File

@ -4,4 +4,3 @@ approvers:
- derekwaynecarr - derekwaynecarr
- vishh - vishh
- ConnorDoyle - ConnorDoyle
- sjenning

View File

@ -38,6 +38,9 @@ type OSInterface interface {
Pipe() (r *os.File, w *os.File, err error) Pipe() (r *os.File, w *os.File, err error)
ReadDir(dirname string) ([]os.FileInfo, error) ReadDir(dirname string) ([]os.FileInfo, error)
Glob(pattern string) ([]string, error) Glob(pattern string) ([]string, error)
Open(name string) (*os.File, error)
OpenFile(name string, flag int, perm os.FileMode) (*os.File, error)
Rename(oldpath, newpath string) error
} }
// RealOS is used to dispatch the real system level operations. // RealOS is used to dispatch the real system level operations.
@ -105,3 +108,18 @@ func (RealOS) ReadDir(dirname string) ([]os.FileInfo, error) {
func (RealOS) Glob(pattern string) ([]string, error) { func (RealOS) Glob(pattern string) ([]string, error) {
return filepath.Glob(pattern) return filepath.Glob(pattern)
} }
// Open will call os.Open to return the file.
func (RealOS) Open(name string) (*os.File, error) {
return os.Open(name)
}
// OpenFile will call os.OpenFile to return the file.
func (RealOS) OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) {
return os.OpenFile(name, flag, perm)
}
// Rename will call os.Rename to rename a file.
func (RealOS) Rename(oldpath, newpath string) error {
return os.Rename(oldpath, newpath)
}

View File

@ -81,6 +81,25 @@ func (ds *dockerService) ListContainers(_ context.Context, r *runtimeapi.ListCon
return &runtimeapi.ListContainersResponse{Containers: result}, nil return &runtimeapi.ListContainersResponse{Containers: result}, nil
} }
func (ds *dockerService) getContainerCleanupInfo(containerID string) (*containerCleanupInfo, bool) {
ds.cleanupInfosLock.RLock()
defer ds.cleanupInfosLock.RUnlock()
info, ok := ds.containerCleanupInfos[containerID]
return info, ok
}
func (ds *dockerService) setContainerCleanupInfo(containerID string, info *containerCleanupInfo) {
ds.cleanupInfosLock.Lock()
defer ds.cleanupInfosLock.Unlock()
ds.containerCleanupInfos[containerID] = info
}
func (ds *dockerService) clearContainerCleanupInfo(containerID string) {
ds.cleanupInfosLock.Lock()
defer ds.cleanupInfosLock.Unlock()
delete(ds.containerCleanupInfos, containerID)
}
// CreateContainer creates a new container in the given PodSandbox // CreateContainer creates a new container in the given PodSandbox
// Docker cannot store the log to an arbitrary location (yet), so we create an // Docker cannot store the log to an arbitrary location (yet), so we create an
// symlink at LogPath, linking to the actual path of the log. // symlink at LogPath, linking to the actual path of the log.
@ -183,7 +202,7 @@ func (ds *dockerService) CreateContainer(_ context.Context, r *runtimeapi.Create
// we don't perform the clean up just yet at that could destroy information // we don't perform the clean up just yet at that could destroy information
// needed for the container to start (e.g. Windows credentials stored in // needed for the container to start (e.g. Windows credentials stored in
// registry keys); instead, we'll clean up when the container gets removed // registry keys); instead, we'll clean up when the container gets removed
ds.containerCleanupInfos[containerID] = cleanupInfo ds.setContainerCleanupInfo(containerID, cleanupInfo)
} }
return &runtimeapi.CreateContainerResponse{ContainerId: containerID}, nil return &runtimeapi.CreateContainerResponse{ContainerId: containerID}, nil
} }
@ -459,11 +478,11 @@ func (ds *dockerService) UpdateContainerResources(_ context.Context, r *runtimea
} }
func (ds *dockerService) performPlatformSpecificContainerForContainer(containerID string) (errors []error) { func (ds *dockerService) performPlatformSpecificContainerForContainer(containerID string) (errors []error) {
if cleanupInfo, present := ds.containerCleanupInfos[containerID]; present { if cleanupInfo, present := ds.getContainerCleanupInfo(containerID); present {
errors = ds.performPlatformSpecificContainerCleanupAndLogErrors(containerID, cleanupInfo) errors = ds.performPlatformSpecificContainerCleanupAndLogErrors(containerID, cleanupInfo)
if len(errors) == 0 { if len(errors) == 0 {
delete(ds.containerCleanupInfos, containerID) ds.clearContainerCleanupInfo(containerID)
} }
} }

View File

@ -29,7 +29,7 @@ import (
dockertypes "github.com/docker/docker/api/types" dockertypes "github.com/docker/docker/api/types"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
@ -313,6 +313,7 @@ type dockerService struct {
// (see `applyPlatformSpecificDockerConfig` and `performPlatformSpecificContainerCleanup` // (see `applyPlatformSpecificDockerConfig` and `performPlatformSpecificContainerCleanup`
// methods for more info). // methods for more info).
containerCleanupInfos map[string]*containerCleanupInfo containerCleanupInfos map[string]*containerCleanupInfo
cleanupInfosLock sync.RWMutex
} }
// TODO: handle context. // TODO: handle context.

View File

@ -1,4 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sjenning

View File

@ -1,4 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sjenning

View File

@ -680,6 +680,22 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.runtimeClassManager = runtimeclass.NewManager(kubeDeps.KubeClient) klet.runtimeClassManager = runtimeclass.NewManager(kubeDeps.KubeClient)
} }
if containerRuntime == kubetypes.RemoteContainerRuntime && utilfeature.DefaultFeatureGate.Enabled(features.CRIContainerLogRotation) {
// setup containerLogManager for CRI container runtime
containerLogManager, err := logs.NewContainerLogManager(
klet.runtimeService,
kubeDeps.OSInterface,
kubeCfg.ContainerLogMaxSize,
int(kubeCfg.ContainerLogMaxFiles),
)
if err != nil {
return nil, fmt.Errorf("failed to initialize container log manager: %v", err)
}
klet.containerLogManager = containerLogManager
} else {
klet.containerLogManager = logs.NewStubContainerLogManager()
}
runtime, err := kuberuntime.NewKubeGenericRuntimeManager( runtime, err := kuberuntime.NewKubeGenericRuntimeManager(
kubecontainer.FilterEventRecorder(kubeDeps.Recorder), kubecontainer.FilterEventRecorder(kubeDeps.Recorder),
klet.livenessManager, klet.livenessManager,
@ -701,6 +717,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
kubeDeps.RemoteImageService, kubeDeps.RemoteImageService,
kubeDeps.ContainerManager.InternalContainerLifecycle(), kubeDeps.ContainerManager.InternalContainerLifecycle(),
kubeDeps.dockerLegacyService, kubeDeps.dockerLegacyService,
klet.containerLogManager,
klet.runtimeClassManager, klet.runtimeClassManager,
) )
if err != nil { if err != nil {
@ -758,21 +775,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
} }
klet.imageManager = imageManager klet.imageManager = imageManager
if containerRuntime == kubetypes.RemoteContainerRuntime && utilfeature.DefaultFeatureGate.Enabled(features.CRIContainerLogRotation) {
// setup containerLogManager for CRI container runtime
containerLogManager, err := logs.NewContainerLogManager(
klet.runtimeService,
kubeCfg.ContainerLogMaxSize,
int(kubeCfg.ContainerLogMaxFiles),
)
if err != nil {
return nil, fmt.Errorf("failed to initialize container log manager: %v", err)
}
klet.containerLogManager = containerLogManager
} else {
klet.containerLogManager = logs.NewStubContainerLogManager()
}
if kubeCfg.ServerTLSBootstrap && kubeDeps.TLSOptions != nil && utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) { if kubeCfg.ServerTLSBootstrap && kubeDeps.TLSOptions != nil && utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
klet.serverCertificateManager, err = kubeletcertificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, klet.getLastObservedNodeAddresses, certDirectory) klet.serverCertificateManager, err = kubeletcertificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, klet.getLastObservedNodeAddresses, certDirectory)
if err != nil { if err != nil {

View File

@ -40,6 +40,7 @@ go_library(
"//pkg/kubelet/images:go_default_library", "//pkg/kubelet/images:go_default_library",
"//pkg/kubelet/kuberuntime/logs:go_default_library", "//pkg/kubelet/kuberuntime/logs:go_default_library",
"//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/logs:go_default_library",
"//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/metrics:go_default_library",
"//pkg/kubelet/prober/results:go_default_library", "//pkg/kubelet/prober/results:go_default_library",
"//pkg/kubelet/runtimeclass:go_default_library", "//pkg/kubelet/runtimeclass:go_default_library",

View File

@ -31,6 +31,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/logs"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/util/logreduction" "k8s.io/kubernetes/pkg/kubelet/util/logreduction"
) )
@ -73,6 +74,10 @@ func (f *fakePodStateProvider) IsPodTerminated(uid types.UID) bool {
func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) { func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) {
recorder := &record.FakeRecorder{} recorder := &record.FakeRecorder{}
logManager, err := logs.NewContainerLogManager(runtimeService, osInterface, "1", 2)
if err != nil {
return nil, err
}
kubeRuntimeManager := &kubeGenericRuntimeManager{ kubeRuntimeManager := &kubeGenericRuntimeManager{
recorder: recorder, recorder: recorder,
cpuCFSQuota: false, cpuCFSQuota: false,
@ -89,6 +94,7 @@ func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
seccompProfileRoot: fakeSeccompProfileRoot, seccompProfileRoot: fakeSeccompProfileRoot,
internalLifecycle: cm.NewFakeInternalContainerLifecycle(), internalLifecycle: cm.NewFakeInternalContainerLifecycle(),
logReduction: logreduction.NewLogReduction(identicalErrorDelay), logReduction: logreduction.NewLogReduction(identicalErrorDelay),
logManager: logManager,
} }
typedVersion, err := runtimeService.Version(kubeRuntimeAPIVersion) typedVersion, err := runtimeService.Version(kubeRuntimeAPIVersion)

View File

@ -883,19 +883,19 @@ func (m *kubeGenericRuntimeManager) removeContainer(containerID string) error {
// removeContainerLog removes the container log. // removeContainerLog removes the container log.
func (m *kubeGenericRuntimeManager) removeContainerLog(containerID string) error { func (m *kubeGenericRuntimeManager) removeContainerLog(containerID string) error {
// Remove the container log. // Use log manager to remove rotated logs.
err := m.logManager.Clean(containerID)
if err != nil {
return err
}
status, err := m.runtimeService.ContainerStatus(containerID) status, err := m.runtimeService.ContainerStatus(containerID)
if err != nil { if err != nil {
return fmt.Errorf("failed to get container status %q: %v", containerID, err) return fmt.Errorf("failed to get container status %q: %v", containerID, err)
} }
labeledInfo := getContainerInfoFromLabels(status.Labels)
path := status.GetLogPath()
if err := m.osInterface.Remove(path); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove container %q log %q: %v", containerID, path, err)
}
// Remove the legacy container log symlink. // Remove the legacy container log symlink.
// TODO(random-liu): Remove this after cluster logging supports CRI container log path. // TODO(random-liu): Remove this after cluster logging supports CRI container log path.
labeledInfo := getContainerInfoFromLabels(status.Labels)
legacySymlink := legacyLogSymlink(containerID, labeledInfo.ContainerName, labeledInfo.PodName, legacySymlink := legacyLogSymlink(containerID, labeledInfo.ContainerName, labeledInfo.PodName,
labeledInfo.PodNamespace) labeledInfo.PodNamespace)
if err := m.osInterface.Remove(legacySymlink); err != nil && !os.IsNotExist(err) { if err := m.osInterface.Remove(legacySymlink); err != nil && !os.IsNotExist(err) {

View File

@ -45,6 +45,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/logs"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/runtimeclass" "k8s.io/kubernetes/pkg/kubelet/runtimeclass"
"k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/types"
@ -128,6 +129,9 @@ type kubeGenericRuntimeManager struct {
// A shim to legacy functions for backward compatibility. // A shim to legacy functions for backward compatibility.
legacyLogProvider LegacyLogProvider legacyLogProvider LegacyLogProvider
// Manage container logs.
logManager logs.ContainerLogManager
// Manage RuntimeClass resources. // Manage RuntimeClass resources.
runtimeClassManager *runtimeclass.Manager runtimeClassManager *runtimeclass.Manager
@ -170,6 +174,7 @@ func NewKubeGenericRuntimeManager(
imageService internalapi.ImageManagerService, imageService internalapi.ImageManagerService,
internalLifecycle cm.InternalContainerLifecycle, internalLifecycle cm.InternalContainerLifecycle,
legacyLogProvider LegacyLogProvider, legacyLogProvider LegacyLogProvider,
logManager logs.ContainerLogManager,
runtimeClassManager *runtimeclass.Manager, runtimeClassManager *runtimeclass.Manager,
) (KubeGenericRuntime, error) { ) (KubeGenericRuntime, error) {
kubeRuntimeManager := &kubeGenericRuntimeManager{ kubeRuntimeManager := &kubeGenericRuntimeManager{
@ -188,6 +193,7 @@ func NewKubeGenericRuntimeManager(
keyring: credentialprovider.NewDockerKeyring(), keyring: credentialprovider.NewDockerKeyring(),
internalLifecycle: internalLifecycle, internalLifecycle: internalLifecycle,
legacyLogProvider: legacyLogProvider, legacyLogProvider: legacyLogProvider,
logManager: logManager,
runtimeClassManager: runtimeClassManager, runtimeClassManager: runtimeClassManager,
logReduction: logreduction.NewLogReduction(identicalErrorDelay), logReduction: logreduction.NewLogReduction(identicalErrorDelay),
} }

View File

@ -9,6 +9,7 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/kubelet/logs", importpath = "k8s.io/kubernetes/pkg/kubelet/logs",
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/kubelet/container:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
@ -23,6 +24,7 @@ go_test(
srcs = ["container_log_manager_test.go"], srcs = ["container_log_manager_test.go"],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/kubelet/container:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library", "//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library",
"//staging/src/k8s.io/cri-api/pkg/apis/testing:go_default_library", "//staging/src/k8s.io/cri-api/pkg/apis/testing:go_default_library",

View File

@ -24,6 +24,7 @@ import (
"path/filepath" "path/filepath"
"sort" "sort"
"strings" "strings"
"sync"
"time" "time"
"k8s.io/klog" "k8s.io/klog"
@ -33,6 +34,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
internalapi "k8s.io/cri-api/pkg/apis" internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
) )
const ( const (
@ -55,6 +57,8 @@ type ContainerLogManager interface {
// TODO(random-liu): Add RotateLogs function and call it under disk pressure. // TODO(random-liu): Add RotateLogs function and call it under disk pressure.
// Start container log manager. // Start container log manager.
Start() Start()
// Clean removes all logs of specified container.
Clean(containerID string) error
} }
// LogRotatePolicy is a policy for container log rotation. The policy applies to all // LogRotatePolicy is a policy for container log rotation. The policy applies to all
@ -142,12 +146,14 @@ func parseMaxSize(size string) (int64, error) {
type containerLogManager struct { type containerLogManager struct {
runtimeService internalapi.RuntimeService runtimeService internalapi.RuntimeService
osInterface kubecontainer.OSInterface
policy LogRotatePolicy policy LogRotatePolicy
clock clock.Clock clock clock.Clock
mutex sync.Mutex
} }
// NewContainerLogManager creates a new container log manager. // NewContainerLogManager creates a new container log manager.
func NewContainerLogManager(runtimeService internalapi.RuntimeService, maxSize string, maxFiles int) (ContainerLogManager, error) { func NewContainerLogManager(runtimeService internalapi.RuntimeService, osInterface kubecontainer.OSInterface, maxSize string, maxFiles int) (ContainerLogManager, error) {
if maxFiles <= 1 { if maxFiles <= 1 {
return nil, fmt.Errorf("invalid MaxFiles %d, must be > 1", maxFiles) return nil, fmt.Errorf("invalid MaxFiles %d, must be > 1", maxFiles)
} }
@ -157,12 +163,14 @@ func NewContainerLogManager(runtimeService internalapi.RuntimeService, maxSize s
} }
// policy LogRotatePolicy // policy LogRotatePolicy
return &containerLogManager{ return &containerLogManager{
osInterface: osInterface,
runtimeService: runtimeService, runtimeService: runtimeService,
policy: LogRotatePolicy{ policy: LogRotatePolicy{
MaxSize: parsedMaxSize, MaxSize: parsedMaxSize,
MaxFiles: maxFiles, MaxFiles: maxFiles,
}, },
clock: clock.RealClock{}, clock: clock.RealClock{},
mutex: sync.Mutex{},
}, nil }, nil
} }
@ -176,7 +184,32 @@ func (c *containerLogManager) Start() {
}, logMonitorPeriod) }, logMonitorPeriod)
} }
// Clean removes all logs of specified container (including rotated one).
func (c *containerLogManager) Clean(containerID string) error {
c.mutex.Lock()
defer c.mutex.Unlock()
status, err := c.runtimeService.ContainerStatus(containerID)
if err != nil {
return fmt.Errorf("failed to get container status %q: %v", containerID, err)
}
pattern := fmt.Sprintf("%s*", status.GetLogPath())
logs, err := c.osInterface.Glob(pattern)
if err != nil {
return fmt.Errorf("failed to list all log files with pattern %q: %v", pattern, err)
}
for _, l := range logs {
if err := c.osInterface.Remove(l); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove container %q log %q: %v", containerID, l, err)
}
}
return nil
}
func (c *containerLogManager) rotateLogs() error { func (c *containerLogManager) rotateLogs() error {
c.mutex.Lock()
defer c.mutex.Unlock()
// TODO(#59998): Use kubelet pod cache. // TODO(#59998): Use kubelet pod cache.
containers, err := c.runtimeService.ListContainers(&runtimeapi.ContainerFilter{}) containers, err := c.runtimeService.ListContainers(&runtimeapi.ContainerFilter{})
if err != nil { if err != nil {
@ -197,7 +230,7 @@ func (c *containerLogManager) rotateLogs() error {
continue continue
} }
path := status.GetLogPath() path := status.GetLogPath()
info, err := os.Stat(path) info, err := c.osInterface.Stat(path)
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
klog.Errorf("Failed to stat container log %q: %v", path, err) klog.Errorf("Failed to stat container log %q: %v", path, err)
@ -211,7 +244,7 @@ func (c *containerLogManager) rotateLogs() error {
continue continue
} }
// The container log should be recovered. // The container log should be recovered.
info, err = os.Stat(path) info, err = c.osInterface.Stat(path)
if err != nil { if err != nil {
klog.Errorf("Failed to stat container log %q after reopen: %v", path, err) klog.Errorf("Failed to stat container log %q after reopen: %v", path, err)
continue continue
@ -269,7 +302,7 @@ func (c *containerLogManager) rotateLog(id, log string) error {
func (c *containerLogManager) cleanupUnusedLogs(logs []string) ([]string, error) { func (c *containerLogManager) cleanupUnusedLogs(logs []string) ([]string, error) {
inuse, unused := filterUnusedLogs(logs) inuse, unused := filterUnusedLogs(logs)
for _, l := range unused { for _, l := range unused {
if err := os.Remove(l); err != nil { if err := c.osInterface.Remove(l); err != nil {
return nil, fmt.Errorf("failed to remove unused log %q: %v", l, err) return nil, fmt.Errorf("failed to remove unused log %q: %v", l, err)
} }
} }
@ -322,7 +355,7 @@ func (c *containerLogManager) removeExcessLogs(logs []string) ([]string, error)
} }
i := 0 i := 0
for ; i < len(logs)-maxRotatedFiles; i++ { for ; i < len(logs)-maxRotatedFiles; i++ {
if err := os.Remove(logs[i]); err != nil { if err := c.osInterface.Remove(logs[i]); err != nil {
return nil, fmt.Errorf("failed to remove old log %q: %v", logs[i], err) return nil, fmt.Errorf("failed to remove old log %q: %v", logs[i], err)
} }
} }
@ -332,19 +365,19 @@ func (c *containerLogManager) removeExcessLogs(logs []string) ([]string, error)
// compressLog compresses a log to log.gz with gzip. // compressLog compresses a log to log.gz with gzip.
func (c *containerLogManager) compressLog(log string) error { func (c *containerLogManager) compressLog(log string) error {
r, err := os.Open(log) r, err := c.osInterface.Open(log)
if err != nil { if err != nil {
return fmt.Errorf("failed to open log %q: %v", log, err) return fmt.Errorf("failed to open log %q: %v", log, err)
} }
defer r.Close() defer r.Close()
tmpLog := log + tmpSuffix tmpLog := log + tmpSuffix
f, err := os.OpenFile(tmpLog, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) f, err := c.osInterface.OpenFile(tmpLog, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil { if err != nil {
return fmt.Errorf("failed to create temporary log %q: %v", tmpLog, err) return fmt.Errorf("failed to create temporary log %q: %v", tmpLog, err)
} }
defer func() { defer func() {
// Best effort cleanup of tmpLog. // Best effort cleanup of tmpLog.
os.Remove(tmpLog) c.osInterface.Remove(tmpLog)
}() }()
defer f.Close() defer f.Close()
w := gzip.NewWriter(f) w := gzip.NewWriter(f)
@ -353,11 +386,11 @@ func (c *containerLogManager) compressLog(log string) error {
return fmt.Errorf("failed to compress %q to %q: %v", log, tmpLog, err) return fmt.Errorf("failed to compress %q to %q: %v", log, tmpLog, err)
} }
compressedLog := log + compressSuffix compressedLog := log + compressSuffix
if err := os.Rename(tmpLog, compressedLog); err != nil { if err := c.osInterface.Rename(tmpLog, compressedLog); err != nil {
return fmt.Errorf("failed to rename %q to %q: %v", tmpLog, compressedLog, err) return fmt.Errorf("failed to rename %q to %q: %v", tmpLog, compressedLog, err)
} }
// Remove old log file. // Remove old log file.
if err := os.Remove(log); err != nil { if err := c.osInterface.Remove(log); err != nil {
return fmt.Errorf("failed to remove log %q after compress: %v", log, err) return fmt.Errorf("failed to remove log %q after compress: %v", log, err)
} }
return nil return nil
@ -368,14 +401,14 @@ func (c *containerLogManager) compressLog(log string) error {
func (c *containerLogManager) rotateLatestLog(id, log string) error { func (c *containerLogManager) rotateLatestLog(id, log string) error {
timestamp := c.clock.Now().Format(timestampFormat) timestamp := c.clock.Now().Format(timestampFormat)
rotated := fmt.Sprintf("%s.%s", log, timestamp) rotated := fmt.Sprintf("%s.%s", log, timestamp)
if err := os.Rename(log, rotated); err != nil { if err := c.osInterface.Rename(log, rotated); err != nil {
return fmt.Errorf("failed to rotate log %q to %q: %v", log, rotated, err) return fmt.Errorf("failed to rotate log %q to %q: %v", log, rotated, err)
} }
if err := c.runtimeService.ReopenContainerLog(id); err != nil { if err := c.runtimeService.ReopenContainerLog(id); err != nil {
// Rename the rotated log back, so that we can try rotating it again // Rename the rotated log back, so that we can try rotating it again
// next round. // next round.
// If kubelet gets restarted at this point, we'll lose original log. // If kubelet gets restarted at this point, we'll lose original log.
if renameErr := os.Rename(rotated, log); renameErr != nil { if renameErr := c.osInterface.Rename(rotated, log); renameErr != nil {
// This shouldn't happen. // This shouldn't happen.
// Report an error if this happens, because we will lose original // Report an error if this happens, because we will lose original
// log. // log.

View File

@ -20,6 +20,10 @@ type containerLogManagerStub struct{}
func (*containerLogManagerStub) Start() {} func (*containerLogManagerStub) Start() {}
func (*containerLogManagerStub) Clean(containerID string) error {
return nil
}
// NewStubContainerLogManager returns an empty ContainerLogManager which does nothing. // NewStubContainerLogManager returns an empty ContainerLogManager which does nothing.
func NewStubContainerLogManager() ContainerLogManager { func NewStubContainerLogManager() ContainerLogManager {
return &containerLogManagerStub{} return &containerLogManagerStub{}

View File

@ -1,4 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sjenning

View File

@ -1,4 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sjenning

View File

@ -655,6 +655,16 @@ func listWithMoreString(list []string, more bool, count, max int) string {
return ret return ret
} }
// translateMicroTimestampSince returns the elapsed time since timestamp in
// human-readable approximation.
func translateMicroTimestampSince(timestamp metav1.MicroTime) string {
if timestamp.IsZero() {
return "<unknown>"
}
return duration.HumanDuration(time.Since(timestamp.Time))
}
// translateTimestampSince returns the elapsed time since timestamp in // translateTimestampSince returns the elapsed time since timestamp in
// human-readable approximation. // human-readable approximation.
func translateTimestampSince(timestamp metav1.Time) string { func translateTimestampSince(timestamp metav1.Time) string {
@ -1660,6 +1670,9 @@ func printEvent(obj *api.Event, options printers.GenerateOptions) ([]metav1.Tabl
} }
firstTimestamp := translateTimestampSince(obj.FirstTimestamp) firstTimestamp := translateTimestampSince(obj.FirstTimestamp)
if obj.FirstTimestamp.IsZero() {
firstTimestamp = translateMicroTimestampSince(obj.EventTime)
}
lastTimestamp := translateTimestampSince(obj.LastTimestamp) lastTimestamp := translateTimestampSince(obj.LastTimestamp)
if obj.LastTimestamp.IsZero() { if obj.LastTimestamp.IsZero() {
lastTimestamp = firstTimestamp lastTimestamp = firstTimestamp

View File

@ -87,7 +87,6 @@ go_test(
"//staging/src/k8s.io/api/events/v1beta1:go_default_library", "//staging/src/k8s.io/api/events/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",

View File

@ -347,7 +347,7 @@ func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState *framework
// incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity // incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity
incomingPodAffinityMap, incomingPodAntiAffinityMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(pod, allNodes) incomingPodAffinityMap, incomingPodAntiAffinityMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(pod, allNodes)
if err != nil { if err != nil {
return framework.NewStatus(framework.Error, fmt.Sprintf("calculating preFilterState: %v", err)) return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("calculating preFilterState: %v", err))
} }
s := &preFilterState{ s := &preFilterState{

View File

@ -203,14 +203,12 @@ func (pl *InterPodAffinity) PreScore(
var antiAffinityTerms []*weightedAffinityTerm var antiAffinityTerms []*weightedAffinityTerm
if hasAffinityConstraints { if hasAffinityConstraints {
if affinityTerms, err = getWeightedAffinityTerms(pod, affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution); err != nil { if affinityTerms, err = getWeightedAffinityTerms(pod, affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution); err != nil {
klog.Error(err) return framework.NewStatus(framework.Error, fmt.Sprintf("processing affinity terms: %+v", err))
return nil
} }
} }
if hasAntiAffinityConstraints { if hasAntiAffinityConstraints {
if antiAffinityTerms, err = getWeightedAffinityTerms(pod, affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution); err != nil { if antiAffinityTerms, err = getWeightedAffinityTerms(pod, affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution); err != nil {
klog.Error(err) return framework.NewStatus(framework.Error, fmt.Sprintf("processing affinity terms: %+v", err))
return nil
} }
} }

View File

@ -22,13 +22,11 @@ import (
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/metrics"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
@ -316,12 +314,9 @@ func (cache *schedulerCache) removeDeletedNodesFromSnapshot(snapshot *Snapshot)
} }
} }
func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) { // PodCount returns the number of pods in the cache (including those from deleted nodes).
alwaysTrue := func(p *v1.Pod) bool { return true } // DO NOT use outside of tests.
return cache.FilteredList(alwaysTrue, selector) func (cache *schedulerCache) PodCount() (int, error) {
}
func (cache *schedulerCache) FilteredList(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
cache.mu.RLock() cache.mu.RLock()
defer cache.mu.RUnlock() defer cache.mu.RUnlock()
// podFilter is expected to return true for most or all of the pods. We // podFilter is expected to return true for most or all of the pods. We
@ -331,15 +326,11 @@ func (cache *schedulerCache) FilteredList(podFilter schedulerlisters.PodFilter,
for _, n := range cache.nodes { for _, n := range cache.nodes {
maxSize += len(n.info.Pods()) maxSize += len(n.info.Pods())
} }
pods := make([]*v1.Pod, 0, maxSize) count := 0
for _, n := range cache.nodes { for _, n := range cache.nodes {
for _, pod := range n.info.Pods() { count += len(n.info.Pods())
if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) {
pods = append(pods, pod)
}
}
} }
return pods, nil return count, nil
} }
func (cache *schedulerCache) AssumePod(pod *v1.Pod) error { func (cache *schedulerCache) AssumePod(pod *v1.Pod) error {
@ -429,13 +420,6 @@ func (cache *schedulerCache) addPod(pod *v1.Pod) {
// Assumes that lock is already acquired. // Assumes that lock is already acquired.
func (cache *schedulerCache) updatePod(oldPod, newPod *v1.Pod) error { func (cache *schedulerCache) updatePod(oldPod, newPod *v1.Pod) error {
if _, ok := cache.nodes[newPod.Spec.NodeName]; !ok {
// The node might have been deleted already.
// This is not a problem in the case where a pod update arrives before the
// node creation, because we will always have a create pod event before
// that, which will create the placeholder node item.
return nil
}
if err := cache.removePod(oldPod); err != nil { if err := cache.removePod(oldPod); err != nil {
return err return err
} }
@ -444,18 +428,23 @@ func (cache *schedulerCache) updatePod(oldPod, newPod *v1.Pod) error {
} }
// Assumes that lock is already acquired. // Assumes that lock is already acquired.
// Removes a pod from the cached node info. When a node is removed, some pod // Removes a pod from the cached node info. If the node information was already
// deletion events might arrive later. This is not a problem, as the pods in // removed and there are no more pods left in the node, cleans up the node from
// the node are assumed to be removed already. // the cache.
func (cache *schedulerCache) removePod(pod *v1.Pod) error { func (cache *schedulerCache) removePod(pod *v1.Pod) error {
n, ok := cache.nodes[pod.Spec.NodeName] n, ok := cache.nodes[pod.Spec.NodeName]
if !ok { if !ok {
klog.Errorf("node %v not found when trying to remove pod %v", pod.Spec.NodeName, pod.Name)
return nil return nil
} }
if err := n.info.RemovePod(pod); err != nil { if err := n.info.RemovePod(pod); err != nil {
return err return err
} }
cache.moveNodeInfoToHead(pod.Spec.NodeName) if len(n.info.Pods()) == 0 && n.info.Node() == nil {
cache.removeNodeInfoFromList(pod.Spec.NodeName)
} else {
cache.moveNodeInfoToHead(pod.Spec.NodeName)
}
return nil return nil
} }
@ -625,21 +614,30 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error {
return n.info.SetNode(newNode) return n.info.SetNode(newNode)
} }
// RemoveNode removes a node from the cache. // RemoveNode removes a node from the cache's tree.
// Some nodes might still have pods because their deletion events didn't arrive // The node might still have pods because their deletion events didn't arrive
// yet. For most intents and purposes, those pods are removed from the cache, // yet. Those pods are considered removed from the cache, being the node tree
// having it's source of truth in the cached nodes. // the source of truth.
// However, some information on pods (assumedPods, podStates) persist. These // However, we keep a ghost node with the list of pods until all pod deletion
// caches will be eventually consistent as pod deletion events arrive. // events have arrived. A ghost node is skipped from snapshots.
func (cache *schedulerCache) RemoveNode(node *v1.Node) error { func (cache *schedulerCache) RemoveNode(node *v1.Node) error {
cache.mu.Lock() cache.mu.Lock()
defer cache.mu.Unlock() defer cache.mu.Unlock()
_, ok := cache.nodes[node.Name] n, ok := cache.nodes[node.Name]
if !ok { if !ok {
return fmt.Errorf("node %v is not found", node.Name) return fmt.Errorf("node %v is not found", node.Name)
} }
cache.removeNodeInfoFromList(node.Name) n.info.RemoveNode()
// We remove NodeInfo for this node only if there aren't any pods on this node.
// We can't do it unconditionally, because notifications about pods are delivered
// in a different watch, and thus can potentially be observed later, even though
// they happened before node removal.
if len(n.info.Pods()) == 0 {
cache.removeNodeInfoFromList(node.Name)
} else {
cache.moveNodeInfoToHead(node.Name)
}
if err := cache.nodeTree.removeNode(node); err != nil { if err := cache.nodeTree.removeNode(node); err != nil {
return err return err
} }
@ -742,19 +740,6 @@ func (cache *schedulerCache) expirePod(key string, ps *podState) error {
return nil return nil
} }
// GetNodeInfo returns cached data for the node name.
func (cache *schedulerCache) GetNodeInfo(nodeName string) (*v1.Node, error) {
cache.mu.RLock()
defer cache.mu.RUnlock()
n, ok := cache.nodes[nodeName]
if !ok {
return nil, fmt.Errorf("node %q not found in cache", nodeName)
}
return n.info.Node(), nil
}
// updateMetrics updates cache size metric values for pods, assumed pods, and nodes // updateMetrics updates cache size metric values for pods, assumed pods, and nodes
func (cache *schedulerCache) updateMetrics() { func (cache *schedulerCache) updateMetrics() {
metrics.CacheSize.WithLabelValues("assumed_pods").Set(float64(len(cache.assumedPods))) metrics.CacheSize.WithLabelValues("assumed_pods").Set(float64(len(cache.assumedPods)))

View File

@ -45,8 +45,8 @@ func (d *CacheDumper) DumpAll() {
func (d *CacheDumper) dumpNodes() { func (d *CacheDumper) dumpNodes() {
dump := d.cache.Dump() dump := d.cache.Dump()
klog.Info("Dump of cached NodeInfo") klog.Info("Dump of cached NodeInfo")
for _, nodeInfo := range dump.Nodes { for name, nodeInfo := range dump.Nodes {
klog.Info(d.printNodeInfo(nodeInfo)) klog.Info(d.printNodeInfo(name, nodeInfo))
} }
} }
@ -61,16 +61,16 @@ func (d *CacheDumper) dumpSchedulingQueue() {
} }
// printNodeInfo writes parts of NodeInfo to a string. // printNodeInfo writes parts of NodeInfo to a string.
func (d *CacheDumper) printNodeInfo(n *schedulernodeinfo.NodeInfo) string { func (d *CacheDumper) printNodeInfo(name string, n *schedulernodeinfo.NodeInfo) string {
var nodeData strings.Builder var nodeData strings.Builder
nodeData.WriteString(fmt.Sprintf("\nNode name: %+v\nRequested Resources: %+v\nAllocatable Resources:%+v\nScheduled Pods(number: %v):\n", nodeData.WriteString(fmt.Sprintf("\nNode name: %s\nDeleted: %t\nRequested Resources: %+v\nAllocatable Resources:%+v\nScheduled Pods(number: %v):\n",
n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods()))) name, n.Node() == nil, n.RequestedResource(), n.AllocatableResource(), len(n.Pods())))
// Dumping Pod Info // Dumping Pod Info
for _, p := range n.Pods() { for _, p := range n.Pods() {
nodeData.WriteString(printPod(p)) nodeData.WriteString(printPod(p))
} }
// Dumping nominated pods info on the node // Dumping nominated pods info on the node
nominatedPods := d.podQueue.NominatedPodsForNode(n.Node().Name) nominatedPods := d.podQueue.NominatedPodsForNode(name)
if len(nominatedPods) != 0 { if len(nominatedPods) != 0 {
nodeData.WriteString(fmt.Sprintf("Nominated Pods(number: %v):\n", len(nominatedPods))) nodeData.WriteString(fmt.Sprintf("Nominated Pods(number: %v):\n", len(nominatedPods)))
for _, p := range nominatedPods { for _, p := range nominatedPods {

View File

@ -18,7 +18,6 @@ package cache
import ( import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
@ -57,7 +56,8 @@ import (
// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue, // - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue,
// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache. // a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache.
type Cache interface { type Cache interface {
schedulerlisters.PodLister // PodCount returns the number of pods in the cache (including those from deleted nodes).
PodCount() (int, error)
// AssumePod assumes a pod scheduled and aggregates the pod's information into its node. // AssumePod assumes a pod scheduled and aggregates the pod's information into its node.
// The implementation also decides the policy to expire pod before being confirmed (receiving Add event). // The implementation also decides the policy to expire pod before being confirmed (receiving Add event).

View File

@ -646,6 +646,12 @@ func (n *NodeInfo) SetNode(node *v1.Node) error {
return nil return nil
} }
// RemoveNode removes the node object, leaving all other tracking information.
func (n *NodeInfo) RemoveNode() {
n.node = nil
n.generation = nextGeneration()
}
// FilterOutPods receives a list of pods and filters out those whose node names // FilterOutPods receives a list of pods and filters out those whose node names
// are equal to the node of this NodeInfo, but are not found in the pods of this NodeInfo. // are equal to the node of this NodeInfo, but are not found in the pods of this NodeInfo.
// //

View File

@ -71,12 +71,16 @@ var maxDataDiskCountMap = map[string]int64{
"STANDARD_D15_V2": 64, "STANDARD_D15_V2": 64,
"STANDARD_D16AS_V4": 32, "STANDARD_D16AS_V4": 32,
"STANDARD_D16A_V4": 32, "STANDARD_D16A_V4": 32,
"STANDARD_D16DS_V4": 32,
"STANDARD_D16D_V4": 32,
"STANDARD_D16S_V3": 32, "STANDARD_D16S_V3": 32,
"STANDARD_D16_V3": 32, "STANDARD_D16_V3": 32,
"STANDARD_D1_V2": 4, "STANDARD_D1_V2": 4,
"STANDARD_D2": 8, "STANDARD_D2": 8,
"STANDARD_D2AS_V4": 4, "STANDARD_D2AS_V4": 4,
"STANDARD_D2A_V4": 4, "STANDARD_D2A_V4": 4,
"STANDARD_D2DS_V4": 4,
"STANDARD_D2D_V4": 4,
"STANDARD_D2S_V3": 4, "STANDARD_D2S_V3": 4,
"STANDARD_D2_V2": 8, "STANDARD_D2_V2": 8,
"STANDARD_D2_V2_PROMO": 8, "STANDARD_D2_V2_PROMO": 8,
@ -84,6 +88,8 @@ var maxDataDiskCountMap = map[string]int64{
"STANDARD_D3": 16, "STANDARD_D3": 16,
"STANDARD_D32AS_V4": 32, "STANDARD_D32AS_V4": 32,
"STANDARD_D32A_V4": 32, "STANDARD_D32A_V4": 32,
"STANDARD_D32DS_V4": 32,
"STANDARD_D32D_V4": 32,
"STANDARD_D32S_V3": 32, "STANDARD_D32S_V3": 32,
"STANDARD_D32_V3": 32, "STANDARD_D32_V3": 32,
"STANDARD_D3_V2": 16, "STANDARD_D3_V2": 16,
@ -91,10 +97,14 @@ var maxDataDiskCountMap = map[string]int64{
"STANDARD_D4": 32, "STANDARD_D4": 32,
"STANDARD_D48AS_V4": 32, "STANDARD_D48AS_V4": 32,
"STANDARD_D48A_V4": 32, "STANDARD_D48A_V4": 32,
"STANDARD_D48DS_V4": 32,
"STANDARD_D48D_V4": 32,
"STANDARD_D48S_V3": 32, "STANDARD_D48S_V3": 32,
"STANDARD_D48_V3": 32, "STANDARD_D48_V3": 32,
"STANDARD_D4AS_V4": 8, "STANDARD_D4AS_V4": 8,
"STANDARD_D4A_V4": 8, "STANDARD_D4A_V4": 8,
"STANDARD_D4DS_V4": 8,
"STANDARD_D4D_V4": 8,
"STANDARD_D4S_V3": 8, "STANDARD_D4S_V3": 8,
"STANDARD_D4_V2": 32, "STANDARD_D4_V2": 32,
"STANDARD_D4_V2_PROMO": 32, "STANDARD_D4_V2_PROMO": 32,
@ -103,10 +113,14 @@ var maxDataDiskCountMap = map[string]int64{
"STANDARD_D5_V2_PROMO": 64, "STANDARD_D5_V2_PROMO": 64,
"STANDARD_D64AS_V4": 32, "STANDARD_D64AS_V4": 32,
"STANDARD_D64A_V4": 32, "STANDARD_D64A_V4": 32,
"STANDARD_D64DS_V4": 32,
"STANDARD_D64D_V4": 32,
"STANDARD_D64S_V3": 32, "STANDARD_D64S_V3": 32,
"STANDARD_D64_V3": 32, "STANDARD_D64_V3": 32,
"STANDARD_D8AS_V4": 16, "STANDARD_D8AS_V4": 16,
"STANDARD_D8A_V4": 16, "STANDARD_D8A_V4": 16,
"STANDARD_D8DS_V4": 16,
"STANDARD_D8D_V4": 16,
"STANDARD_D8S_V3": 16, "STANDARD_D8S_V3": 16,
"STANDARD_D8_V3": 16, "STANDARD_D8_V3": 16,
"STANDARD_D96AS_V4": 32, "STANDARD_D96AS_V4": 32,
@ -150,47 +164,71 @@ var maxDataDiskCountMap = map[string]int64{
"STANDARD_DS4_V2_PROMO": 32, "STANDARD_DS4_V2_PROMO": 32,
"STANDARD_DS5_V2": 64, "STANDARD_DS5_V2": 64,
"STANDARD_DS5_V2_PROMO": 64, "STANDARD_DS5_V2_PROMO": 64,
"STANDARD_E16-4DS_V4": 32,
"STANDARD_E16-4S_V3": 32, "STANDARD_E16-4S_V3": 32,
"STANDARD_E16-8DS_V4": 32,
"STANDARD_E16-8S_V3": 32, "STANDARD_E16-8S_V3": 32,
"STANDARD_E16AS_V4": 32, "STANDARD_E16AS_V4": 32,
"STANDARD_E16A_V4": 32, "STANDARD_E16A_V4": 32,
"STANDARD_E16DS_V4": 32,
"STANDARD_E16D_V4": 32,
"STANDARD_E16S_V3": 32, "STANDARD_E16S_V3": 32,
"STANDARD_E16_V3": 32, "STANDARD_E16_V3": 32,
"STANDARD_E20AS_V4": 32, "STANDARD_E20AS_V4": 32,
"STANDARD_E20A_V4": 32, "STANDARD_E20A_V4": 32,
"STANDARD_E20DS_V4": 32,
"STANDARD_E20D_V4": 32,
"STANDARD_E20S_V3": 32, "STANDARD_E20S_V3": 32,
"STANDARD_E20_V3": 32, "STANDARD_E20_V3": 32,
"STANDARD_E2AS_V4": 4, "STANDARD_E2AS_V4": 4,
"STANDARD_E2A_V4": 4, "STANDARD_E2A_V4": 4,
"STANDARD_E2DS_V4": 4,
"STANDARD_E2D_V4": 4,
"STANDARD_E2S_V3": 4, "STANDARD_E2S_V3": 4,
"STANDARD_E2_V3": 4, "STANDARD_E2_V3": 4,
"STANDARD_E32-16DS_V4": 32,
"STANDARD_E32-16S_V3": 32, "STANDARD_E32-16S_V3": 32,
"STANDARD_E32-8DS_V4": 32,
"STANDARD_E32-8S_V3": 32, "STANDARD_E32-8S_V3": 32,
"STANDARD_E32AS_V4": 32, "STANDARD_E32AS_V4": 32,
"STANDARD_E32A_V4": 32, "STANDARD_E32A_V4": 32,
"STANDARD_E32DS_V4": 32,
"STANDARD_E32D_V4": 32,
"STANDARD_E32S_V3": 32, "STANDARD_E32S_V3": 32,
"STANDARD_E32_V3": 32, "STANDARD_E32_V3": 32,
"STANDARD_E4-2DS_V4": 8,
"STANDARD_E4-2S_V3": 8, "STANDARD_E4-2S_V3": 8,
"STANDARD_E48AS_V4": 32, "STANDARD_E48AS_V4": 32,
"STANDARD_E48A_V4": 32, "STANDARD_E48A_V4": 32,
"STANDARD_E48DS_V4": 32,
"STANDARD_E48D_V4": 32,
"STANDARD_E48S_V3": 32, "STANDARD_E48S_V3": 32,
"STANDARD_E48_V3": 32, "STANDARD_E48_V3": 32,
"STANDARD_E4AS_V4": 8, "STANDARD_E4AS_V4": 8,
"STANDARD_E4A_V4": 8, "STANDARD_E4A_V4": 8,
"STANDARD_E4DS_V4": 8,
"STANDARD_E4D_V4": 8,
"STANDARD_E4S_V3": 8, "STANDARD_E4S_V3": 8,
"STANDARD_E4_V3": 8, "STANDARD_E4_V3": 8,
"STANDARD_E64-16DS_V4": 32,
"STANDARD_E64-16S_V3": 32, "STANDARD_E64-16S_V3": 32,
"STANDARD_E64-32S_V3": 32, "STANDARD_E64-32S_V3": 32,
"STANDARD_E64AS_V4": 32, "STANDARD_E64AS_V4": 32,
"STANDARD_E64A_V4": 32, "STANDARD_E64A_V4": 32,
"STANDARD_E64DS_V4": 32,
"STANDARD_E64D_V4": 32,
"STANDARD_E64IS_V3": 32, "STANDARD_E64IS_V3": 32,
"STANDARD_E64I_V3": 32, "STANDARD_E64I_V3": 32,
"STANDARD_E64S_V3": 32, "STANDARD_E64S_V3": 32,
"STANDARD_E64_V3": 32, "STANDARD_E64_V3": 32,
"STANDARD_E8-2DS_V4": 16,
"STANDARD_E8-2S_V3": 16, "STANDARD_E8-2S_V3": 16,
"STANDARD_E8-4DS_V4": 16,
"STANDARD_E8-4S_V3": 16, "STANDARD_E8-4S_V3": 16,
"STANDARD_E8AS_V4": 16, "STANDARD_E8AS_V4": 16,
"STANDARD_E8A_V4": 16, "STANDARD_E8A_V4": 16,
"STANDARD_E8DS_V4": 16,
"STANDARD_E8D_V4": 16,
"STANDARD_E8S_V3": 16, "STANDARD_E8S_V3": 16,
"STANDARD_E8_V3": 16, "STANDARD_E8_V3": 16,
"STANDARD_E96AS_V4": 32, "STANDARD_E96AS_V4": 32,
@ -268,6 +306,8 @@ var maxDataDiskCountMap = map[string]int64{
"STANDARD_M32LS": 32, "STANDARD_M32LS": 32,
"STANDARD_M32MS": 32, "STANDARD_M32MS": 32,
"STANDARD_M32TS": 32, "STANDARD_M32TS": 32,
"STANDARD_M416-208MS_V2": 64,
"STANDARD_M416-208S_V2": 64,
"STANDARD_M416MS_V2": 64, "STANDARD_M416MS_V2": 64,
"STANDARD_M416S_V2": 64, "STANDARD_M416S_V2": 64,
"STANDARD_M64-16MS": 64, "STANDARD_M64-16MS": 64,

View File

@ -69,7 +69,11 @@ func getPageSize(path string, mounter mount.Interface) (*resource.Quantity, erro
// NOTE: Adding suffix 'i' as result should be comparable with a medium size. // NOTE: Adding suffix 'i' as result should be comparable with a medium size.
// pagesize mount option is specified without a suffix, // pagesize mount option is specified without a suffix,
// e.g. pagesize=2M or pagesize=1024M for x86 CPUs // e.g. pagesize=2M or pagesize=1024M for x86 CPUs
pageSize, err := resource.ParseQuantity(strings.TrimPrefix(opt, prefix) + "i") trimmedOpt := strings.TrimPrefix(opt, prefix)
if !strings.HasSuffix(trimmedOpt, "i") {
trimmedOpt = trimmedOpt + "i"
}
pageSize, err := resource.ParseQuantity(trimmedOpt)
if err != nil { if err != nil {
return nil, fmt.Errorf("error getting page size from '%s' mount option: %v", opt, err) return nil, fmt.Errorf("error getting page size from '%s' mount option: %v", opt, err)
} }

View File

@ -23,6 +23,7 @@ import (
"syscall" "syscall"
"os" "os"
"time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -46,7 +47,10 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1
fsGroupPolicyEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConfigurableFSGroupPolicy) fsGroupPolicyEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConfigurableFSGroupPolicy)
klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", mounter.GetPath()) timer := time.AfterFunc(30*time.Second, func() {
klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", mounter.GetPath())
})
defer timer.Stop()
// This code exists for legacy purposes, so as old behaviour is entirely preserved when feature gate is disabled // This code exists for legacy purposes, so as old behaviour is entirely preserved when feature gate is disabled
// TODO: remove this when ConfigurableFSGroupPolicy turns GA. // TODO: remove this when ConfigurableFSGroupPolicy turns GA.

View File

@ -251,6 +251,14 @@ const (
filterNodeLimit = 150 filterNodeLimit = 150
) )
const (
// represents expected attachment status of a volume after attach
volumeAttachedStatus = "attached"
// represents expected attachment status of a volume after detach
volumeDetachedStatus = "detached"
)
// awsTagNameMasterRoles is a set of well-known AWS tag names that indicate the instance is a master // awsTagNameMasterRoles is a set of well-known AWS tag names that indicate the instance is a master
// The major consequence is that it is then not considered for AWS zone discovery for dynamic volume creation. // The major consequence is that it is then not considered for AWS zone discovery for dynamic volume creation.
var awsTagNameMasterRoles = sets.NewString("kubernetes.io/role/master", "k8s.io/role/master") var awsTagNameMasterRoles = sets.NewString("kubernetes.io/role/master", "k8s.io/role/master")
@ -1929,7 +1937,6 @@ func (c *Cloud) getMountDevice(
// AWS API returns consistent result next time (i.e. the volume is detached). // AWS API returns consistent result next time (i.e. the volume is detached).
status := volumeStatus[mappingVolumeID] status := volumeStatus[mappingVolumeID]
klog.Warningf("Got assignment call for already-assigned volume: %s@%s, volume status: %s", mountDevice, mappingVolumeID, status) klog.Warningf("Got assignment call for already-assigned volume: %s@%s, volume status: %s", mountDevice, mappingVolumeID, status)
return mountDevice, false, fmt.Errorf("volume is still being detached from the node")
} }
return mountDevice, true, nil return mountDevice, true, nil
} }
@ -2130,7 +2137,7 @@ func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string)
// waitForAttachmentStatus polls until the attachment status is the expected value // waitForAttachmentStatus polls until the attachment status is the expected value
// On success, it returns the last attachment state. // On success, it returns the last attachment state.
func (d *awsDisk) waitForAttachmentStatus(status string, expectedInstance, expectedDevice string) (*ec2.VolumeAttachment, error) { func (d *awsDisk) waitForAttachmentStatus(status string, expectedInstance, expectedDevice string, alreadyAttached bool) (*ec2.VolumeAttachment, error) {
backoff := wait.Backoff{ backoff := wait.Backoff{
Duration: volumeAttachmentStatusPollDelay, Duration: volumeAttachmentStatusPollDelay,
Factor: volumeAttachmentStatusFactor, Factor: volumeAttachmentStatusFactor,
@ -2155,7 +2162,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string, expectedInstance, expec
if err != nil { if err != nil {
// The VolumeNotFound error is special -- we don't need to wait for it to repeat // The VolumeNotFound error is special -- we don't need to wait for it to repeat
if isAWSErrorVolumeNotFound(err) { if isAWSErrorVolumeNotFound(err) {
if status == "detached" { if status == volumeDetachedStatus {
// The disk doesn't exist, assume it's detached, log warning and stop waiting // The disk doesn't exist, assume it's detached, log warning and stop waiting
klog.Warningf("Waiting for volume %q to be detached but the volume does not exist", d.awsID) klog.Warningf("Waiting for volume %q to be detached but the volume does not exist", d.awsID)
stateStr := "detached" stateStr := "detached"
@ -2164,7 +2171,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string, expectedInstance, expec
} }
return true, nil return true, nil
} }
if status == "attached" { if status == volumeAttachedStatus {
// The disk doesn't exist, complain, give up waiting and report error // The disk doesn't exist, complain, give up waiting and report error
klog.Warningf("Waiting for volume %q to be attached but the volume does not exist", d.awsID) klog.Warningf("Waiting for volume %q to be attached but the volume does not exist", d.awsID)
return false, err return false, err
@ -2199,7 +2206,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string, expectedInstance, expec
} }
} }
if attachmentStatus == "" { if attachmentStatus == "" {
attachmentStatus = "detached" attachmentStatus = volumeDetachedStatus
} }
if attachment != nil { if attachment != nil {
// AWS eventual consistency can go back in time. // AWS eventual consistency can go back in time.
@ -2228,6 +2235,13 @@ func (d *awsDisk) waitForAttachmentStatus(status string, expectedInstance, expec
} }
} }
// if we expected volume to be attached and it was reported as already attached via DescribeInstance call
// but DescribeVolume told us volume is detached, we will short-circuit this long wait loop and return error
// so as AttachDisk can be retried without waiting for 20 minutes.
if (status == volumeAttachedStatus) && alreadyAttached && (attachmentStatus != status) {
return false, fmt.Errorf("attachment of disk %q failed, expected device to be attached but was %s", d.name, attachmentStatus)
}
if attachmentStatus == status { if attachmentStatus == status {
// Attachment is in requested state, finish waiting // Attachment is in requested state, finish waiting
return true, nil return true, nil
@ -2373,7 +2387,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName)
klog.V(2).Infof("AttachVolume volume=%q instance=%q request returned %v", disk.awsID, awsInstance.awsID, attachResponse) klog.V(2).Infof("AttachVolume volume=%q instance=%q request returned %v", disk.awsID, awsInstance.awsID, attachResponse)
} }
attachment, err := disk.waitForAttachmentStatus("attached", awsInstance.awsID, ec2Device) attachment, err := disk.waitForAttachmentStatus("attached", awsInstance.awsID, ec2Device, alreadyAttached)
if err != nil { if err != nil {
if err == wait.ErrWaitTimeout { if err == wait.ErrWaitTimeout {
@ -2451,7 +2465,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName)
return "", errors.New("no response from DetachVolume") return "", errors.New("no response from DetachVolume")
} }
attachment, err := diskInfo.disk.waitForAttachmentStatus("detached", awsInstance.awsID, "") attachment, err := diskInfo.disk.waitForAttachmentStatus("detached", awsInstance.awsID, "", false)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -3603,6 +3617,27 @@ func buildListener(port v1.ServicePort, annotations map[string]string, sslPorts
return listener, nil return listener, nil
} }
func (c *Cloud) getSubnetCidrs(subnetIDs []string) ([]string, error) {
request := &ec2.DescribeSubnetsInput{}
for _, subnetID := range subnetIDs {
request.SubnetIds = append(request.SubnetIds, aws.String(subnetID))
}
subnets, err := c.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error querying Subnet for ELB: %q", err)
}
if len(subnets) != len(subnetIDs) {
return nil, fmt.Errorf("error querying Subnet for ELB, got %d subnets for %v", len(subnets), subnetIDs)
}
cidrs := make([]string, 0, len(subnets))
for _, subnet := range subnets {
cidrs = append(cidrs, aws.StringValue(subnet.CidrBlock))
}
return cidrs, nil
}
// EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer // EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer
func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
annotations := apiService.Annotations annotations := apiService.Annotations
@ -3730,6 +3765,12 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS
return nil, err return nil, err
} }
subnetCidrs, err := c.getSubnetCidrs(subnetIDs)
if err != nil {
klog.Errorf("Error getting subnet cidrs: %q", err)
return nil, err
}
sourceRangeCidrs := []string{} sourceRangeCidrs := []string{}
for cidr := range sourceRanges { for cidr := range sourceRanges {
sourceRangeCidrs = append(sourceRangeCidrs, cidr) sourceRangeCidrs = append(sourceRangeCidrs, cidr)
@ -3738,7 +3779,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS
sourceRangeCidrs = append(sourceRangeCidrs, "0.0.0.0/0") sourceRangeCidrs = append(sourceRangeCidrs, "0.0.0.0/0")
} }
err = c.updateInstanceSecurityGroupsForNLB(loadBalancerName, instances, sourceRangeCidrs, v2Mappings) err = c.updateInstanceSecurityGroupsForNLB(loadBalancerName, instances, subnetCidrs, sourceRangeCidrs, v2Mappings)
if err != nil { if err != nil {
klog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) klog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err)
return nil, err return nil, err
@ -4317,7 +4358,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin
} }
} }
return c.updateInstanceSecurityGroupsForNLB(loadBalancerName, nil, nil, nil) return c.updateInstanceSecurityGroupsForNLB(loadBalancerName, nil, nil, nil, nil)
} }
lb, err := c.describeLoadBalancer(loadBalancerName) lb, err := c.describeLoadBalancer(loadBalancerName)
@ -4719,7 +4760,7 @@ func setNodeDisk(
} }
func getInitialAttachDetachDelay(status string) time.Duration { func getInitialAttachDetachDelay(status string) time.Duration {
if status == "detached" { if status == volumeDetachedStatus {
return volumeDetachmentStatusInitialDelay return volumeDetachmentStatusInitialDelay
} }
return volumeAttachmentStatusInitialDelay return volumeAttachmentStatusInitialDelay

View File

@ -706,27 +706,9 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty
return targetGroup, nil return targetGroup, nil
} }
func (c *Cloud) getVpcCidrBlocks() ([]string, error) {
vpcs, err := c.ec2.DescribeVpcs(&ec2.DescribeVpcsInput{
VpcIds: []*string{aws.String(c.vpcID)},
})
if err != nil {
return nil, fmt.Errorf("error querying VPC for ELB: %q", err)
}
if len(vpcs.Vpcs) != 1 {
return nil, fmt.Errorf("error querying VPC for ELB, got %d vpcs for %s", len(vpcs.Vpcs), c.vpcID)
}
cidrBlocks := make([]string, 0, len(vpcs.Vpcs[0].CidrBlockAssociationSet))
for _, cidr := range vpcs.Vpcs[0].CidrBlockAssociationSet {
cidrBlocks = append(cidrBlocks, aws.StringValue(cidr.CidrBlock))
}
return cidrBlocks, nil
}
// updateInstanceSecurityGroupsForNLB will adjust securityGroup's settings to allow inbound traffic into instances from clientCIDRs and portMappings. // updateInstanceSecurityGroupsForNLB will adjust securityGroup's settings to allow inbound traffic into instances from clientCIDRs and portMappings.
// TIP: if either instances or clientCIDRs or portMappings are nil, then the securityGroup rules for lbName are cleared. // TIP: if either instances or clientCIDRs or portMappings are nil, then the securityGroup rules for lbName are cleared.
func (c *Cloud) updateInstanceSecurityGroupsForNLB(lbName string, instances map[InstanceID]*ec2.Instance, clientCIDRs []string, portMappings []nlbPortMapping) error { func (c *Cloud) updateInstanceSecurityGroupsForNLB(lbName string, instances map[InstanceID]*ec2.Instance, subnetCIDRs []string, clientCIDRs []string, portMappings []nlbPortMapping) error {
if c.cfg.Global.DisableSecurityGroupIngress { if c.cfg.Global.DisableSecurityGroupIngress {
return nil return nil
} }
@ -770,14 +752,10 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLB(lbName string, instances map[
} }
clientRuleAnnotation := fmt.Sprintf("%s=%s", NLBClientRuleDescription, lbName) clientRuleAnnotation := fmt.Sprintf("%s=%s", NLBClientRuleDescription, lbName)
healthRuleAnnotation := fmt.Sprintf("%s=%s", NLBHealthCheckRuleDescription, lbName) healthRuleAnnotation := fmt.Sprintf("%s=%s", NLBHealthCheckRuleDescription, lbName)
vpcCIDRs, err := c.getVpcCidrBlocks()
if err != nil {
return err
}
for sgID, sg := range clusterSGs { for sgID, sg := range clusterSGs {
sgPerms := NewIPPermissionSet(sg.IpPermissions...).Ungroup() sgPerms := NewIPPermissionSet(sg.IpPermissions...).Ungroup()
if desiredSGIDs.Has(sgID) { if desiredSGIDs.Has(sgID) {
if err := c.updateInstanceSecurityGroupForNLBTraffic(sgID, sgPerms, healthRuleAnnotation, "tcp", healthCheckPorts, vpcCIDRs); err != nil { if err := c.updateInstanceSecurityGroupForNLBTraffic(sgID, sgPerms, healthRuleAnnotation, "tcp", healthCheckPorts, subnetCIDRs); err != nil {
return err return err
} }
if err := c.updateInstanceSecurityGroupForNLBTraffic(sgID, sgPerms, clientRuleAnnotation, "tcp", clientPorts, clientCIDRs); err != nil { if err := c.updateInstanceSecurityGroupForNLBTraffic(sgID, sgPerms, clientRuleAnnotation, "tcp", clientPorts, clientCIDRs); err != nil {

View File

@ -384,7 +384,7 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro
// No credentials provided, useInstanceMetadata should be enabled for Kubelet. // No credentials provided, useInstanceMetadata should be enabled for Kubelet.
// TODO(feiskyer): print different error message for Kubelet and controller-manager, as they're // TODO(feiskyer): print different error message for Kubelet and controller-manager, as they're
// requiring different credential settings. // requiring different credential settings.
if !config.UseInstanceMetadata && az.Config.CloudConfigType == cloudConfigTypeFile { if !config.UseInstanceMetadata && config.CloudConfigType == cloudConfigTypeFile {
return fmt.Errorf("useInstanceMetadata must be enabled without Azure credentials") return fmt.Errorf("useInstanceMetadata must be enabled without Azure credentials")
} }

View File

@ -1358,6 +1358,9 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
for vmssName := range vmssNamesMap { for vmssName := range vmssNamesMap {
vmss, err := ss.getVMSS(vmssName, azcache.CacheReadTypeDefault) vmss, err := ss.getVMSS(vmssName, azcache.CacheReadTypeDefault)
if err != nil {
return err
}
// When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
// Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it.
@ -1365,10 +1368,6 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
klog.V(3).Infof("ensureVMSSInPool: found vmss %s being deleted, skipping", vmssName) klog.V(3).Infof("ensureVMSSInPool: found vmss %s being deleted, skipping", vmssName)
continue continue
} }
if err != nil {
return err
}
if vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil { if vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil {
klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vmss %s", vmssName) klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vmss %s", vmssName)
continue continue

44
vendor/modules.txt vendored
View File

@ -423,7 +423,7 @@ github.com/emicklei/go-restful
github.com/emicklei/go-restful/log github.com/emicklei/go-restful/log
# github.com/euank/go-kmsg-parser v2.0.0+incompatible # github.com/euank/go-kmsg-parser v2.0.0+incompatible
github.com/euank/go-kmsg-parser/kmsgparser github.com/euank/go-kmsg-parser/kmsgparser
# github.com/evanphx/json-patch v4.5.0+incompatible # github.com/evanphx/json-patch v4.9.0+incompatible
github.com/evanphx/json-patch github.com/evanphx/json-patch
# github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d # github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d
github.com/exponent-io/jsonpath github.com/exponent-io/jsonpath
@ -1140,7 +1140,7 @@ gopkg.in/square/go-jose.v2/jwt
gopkg.in/warnings.v0 gopkg.in/warnings.v0
# gopkg.in/yaml.v2 v2.2.8 # gopkg.in/yaml.v2 v2.2.8
gopkg.in/yaml.v2 gopkg.in/yaml.v2
# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.8-k3s1 # k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.9-k3s1
k8s.io/api/admission/v1 k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1 k8s.io/api/admission/v1beta1
k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1
@ -1184,7 +1184,7 @@ k8s.io/api/settings/v1alpha1
k8s.io/api/storage/v1 k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1 k8s.io/api/storage/v1beta1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.8-k3s1 # k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.9-k3s1
k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apihelpers
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
@ -1224,7 +1224,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
k8s.io/apiextensions-apiserver/pkg/registry/customresource k8s.io/apiextensions-apiserver/pkg/registry/customresource
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.8-k3s1 # k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.9-k3s1
k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/meta
@ -1286,7 +1286,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.8-k3s1 # k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.9-k3s1
k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/configuration
k8s.io/apiserver/pkg/admission/initializer k8s.io/apiserver/pkg/admission/initializer
@ -1416,7 +1416,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.8-k3s1 # k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.9-k3s1
k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/kustomize k8s.io/cli-runtime/pkg/kustomize
k8s.io/cli-runtime/pkg/kustomize/k8sdeps k8s.io/cli-runtime/pkg/kustomize/k8sdeps
@ -1429,7 +1429,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource k8s.io/cli-runtime/pkg/resource
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.8-k3s1 # k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.9-k3s1
k8s.io/client-go/discovery k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached k8s.io/client-go/discovery/cached
k8s.io/client-go/discovery/cached/disk k8s.io/client-go/discovery/cached/disk
@ -1663,7 +1663,7 @@ k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue k8s.io/client-go/util/workqueue
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.8-k3s1 # k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.9-k3s1
k8s.io/cloud-provider k8s.io/cloud-provider
k8s.io/cloud-provider/api k8s.io/cloud-provider/api
k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/node/helpers
@ -1671,13 +1671,13 @@ k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/volume/helpers k8s.io/cloud-provider/volume/helpers
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.8-k3s1 # k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.9-k3s1
k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/api
k8s.io/cluster-bootstrap/token/jws k8s.io/cluster-bootstrap/token/jws
k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/token/util
k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/secrets
k8s.io/cluster-bootstrap/util/tokens k8s.io/cluster-bootstrap/util/tokens
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.8-k3s1 # k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.9-k3s1
k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/args
k8s.io/code-generator/cmd/client-gen/generators k8s.io/code-generator/cmd/client-gen/generators
k8s.io/code-generator/cmd/client-gen/generators/fake k8s.io/code-generator/cmd/client-gen/generators/fake
@ -1692,7 +1692,7 @@ k8s.io/code-generator/cmd/lister-gen/args
k8s.io/code-generator/cmd/lister-gen/generators k8s.io/code-generator/cmd/lister-gen/generators
k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/namer
k8s.io/code-generator/pkg/util k8s.io/code-generator/pkg/util
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.8-k3s1 # k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.9-k3s1
k8s.io/component-base/cli/flag k8s.io/component-base/cli/flag
k8s.io/component-base/cli/globalflag k8s.io/component-base/cli/globalflag
k8s.io/component-base/codec k8s.io/component-base/codec
@ -1710,10 +1710,10 @@ k8s.io/component-base/metrics/prometheus/workqueue
k8s.io/component-base/metrics/testutil k8s.io/component-base/metrics/testutil
k8s.io/component-base/version k8s.io/component-base/version
k8s.io/component-base/version/verflag k8s.io/component-base/version/verflag
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.8-k3s1 # k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.9-k3s1
k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis
k8s.io/cri-api/pkg/apis/runtime/v1alpha2 k8s.io/cri-api/pkg/apis/runtime/v1alpha2
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.8-k3s1 # k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.9-k3s1
k8s.io/csi-translation-lib k8s.io/csi-translation-lib
k8s.io/csi-translation-lib/plugins k8s.io/csi-translation-lib/plugins
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 # k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
@ -1728,7 +1728,7 @@ k8s.io/gengo/types
k8s.io/heapster/metrics/api/v1/types k8s.io/heapster/metrics/api/v1/types
# k8s.io/klog v1.0.0 # k8s.io/klog v1.0.0
k8s.io/klog k8s.io/klog
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.8-k3s1 # k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.9-k3s1
k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration
k8s.io/kube-aggregator/pkg/apis/apiregistration/install k8s.io/kube-aggregator/pkg/apis/apiregistration/install
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
@ -1756,7 +1756,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
k8s.io/kube-aggregator/pkg/registry/apiservice k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
k8s.io/kube-aggregator/pkg/registry/apiservice/rest k8s.io/kube-aggregator/pkg/registry/apiservice/rest
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.8-k3s1 # k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.9-k3s1
k8s.io/kube-controller-manager/config/v1alpha1 k8s.io/kube-controller-manager/config/v1alpha1
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 # k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
k8s.io/kube-openapi/pkg/aggregator k8s.io/kube-openapi/pkg/aggregator
@ -1767,14 +1767,14 @@ k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util
k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/util/proto/validation
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.8-k3s1 # k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.9-k3s1
k8s.io/kube-proxy/config/v1alpha1 k8s.io/kube-proxy/config/v1alpha1
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.8-k3s1 # k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.9-k3s1
k8s.io/kube-scheduler/config/v1 k8s.io/kube-scheduler/config/v1
k8s.io/kube-scheduler/config/v1alpha1 k8s.io/kube-scheduler/config/v1alpha1
k8s.io/kube-scheduler/config/v1alpha2 k8s.io/kube-scheduler/config/v1alpha2
k8s.io/kube-scheduler/extender/v1 k8s.io/kube-scheduler/extender/v1
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.8-k3s1 # k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.9-k3s1
k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/apps
k8s.io/kubectl/pkg/cmd k8s.io/kubectl/pkg/cmd
k8s.io/kubectl/pkg/cmd/annotate k8s.io/kubectl/pkg/cmd/annotate
@ -1849,11 +1849,11 @@ k8s.io/kubectl/pkg/util/storage
k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/templates
k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/validation k8s.io/kubectl/pkg/validation
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.8-k3s1 # k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.9-k3s1
k8s.io/kubelet/config/v1beta1 k8s.io/kubelet/config/v1beta1
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/pluginregistration/v1
# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.8-k3s1 # k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.9-k3s1
k8s.io/kubernetes/cmd/cloud-controller-manager/app k8s.io/kubernetes/cmd/cloud-controller-manager/app
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
@ -2597,7 +2597,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/simple
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
k8s.io/kubernetes/third_party/forked/ipvs k8s.io/kubernetes/third_party/forked/ipvs
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.8-k3s1 # k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.9-k3s1
k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/aws
k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure
k8s.io/legacy-cloud-providers/azure/auth k8s.io/legacy-cloud-providers/azure/auth
@ -2628,7 +2628,7 @@ k8s.io/legacy-cloud-providers/openstack
k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere
k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.8-k3s1 # k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.9-k3s1
k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2