Update Kubernetes to v1.21.1-k3s1

pull/3328/head
Erik Wilson 2021-05-14 10:12:55 -07:00
parent 6ee28214fa
commit 70430b53a8
No known key found for this signature in database
GPG Key ID: 28E43BB8BE202CF8
110 changed files with 2119 additions and 2056 deletions

74
go.mod
View File

@ -33,34 +33,34 @@ replace (
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884
google.golang.org/grpc => google.golang.org/grpc v1.27.1 google.golang.org/grpc => google.golang.org/grpc v1.27.1
gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2 gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.0-k3s1 k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.1-k3s1
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.0-k3s1 k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.1-k3s1
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.0-k3s1 k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.1-k3s1
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.0-k3s1 k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.1-k3s1
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.0-k3s1 k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.1-k3s1
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.0-k3s1 k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.1-k3s1
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.0-k3s1 k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.1-k3s1
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.0-k3s1 k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.1-k3s1
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.0-k3s1 k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.1-k3s1
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.0-k3s1 k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.1-k3s1
k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.0-k3s1 k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.1-k3s1
k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.0-k3s1 k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.1-k3s1
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.0-k3s1 k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.1-k3s1
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.0-k3s1 k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.1-k3s1
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.0-k3s1 k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.1-k3s1
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.0-k3s1 k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.1-k3s1
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.0-k3s1 k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.1-k3s1
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.0-k3s1 k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.1-k3s1
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.0-k3s1 k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.1-k3s1
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.0-k3s1 k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.1-k3s1
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.21.0-k3s1 k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.21.1-k3s1
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.0-k3s1 k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.1-k3s1
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.0-k3s1 k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.1-k3s1
k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.0-k3s1 k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.1-k3s1
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.21.0-k3s1 k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.21.1-k3s1
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.21.0-k3s1 k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.21.1-k3s1
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.21.0-k3s1 k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.21.1-k3s1
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.21.0-k3s1 k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.21.1-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7
) )
@ -114,17 +114,17 @@ require (
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073
google.golang.org/grpc v1.37.0 google.golang.org/grpc v1.37.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.21.0 k8s.io/api v0.21.1
k8s.io/apimachinery v0.21.0 k8s.io/apimachinery v0.21.1
k8s.io/apiserver v0.21.0 k8s.io/apiserver v0.21.1
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
k8s.io/cloud-provider v0.21.0 k8s.io/cloud-provider v0.21.1
k8s.io/component-base v0.21.0 k8s.io/component-base v0.21.1
k8s.io/controller-manager v0.21.0 k8s.io/controller-manager v0.21.1
k8s.io/cri-api v0.21.0 k8s.io/cri-api v0.21.1
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/kubectl v0.21.0 k8s.io/kubectl v0.21.1
k8s.io/kubernetes v1.21.0 k8s.io/kubernetes v1.21.1
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 k8s.io/utils v0.0.0-20201110183641-67b214c5f920
sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml v1.2.0
) )

112
go.sum
View File

@ -544,55 +544,55 @@ github.com/k3s-io/helm-controller v0.9.1 h1:qtHWTNHiuCPRbA2YZ7z7jTgSHo7Yc5He52oM
github.com/k3s-io/helm-controller v0.9.1/go.mod h1:nZP8FH3KZrNNUf5r+SwwiMR63HS6lxdHdpHijgPfF74= github.com/k3s-io/helm-controller v0.9.1/go.mod h1:nZP8FH3KZrNNUf5r+SwwiMR63HS6lxdHdpHijgPfF74=
github.com/k3s-io/kine v0.6.0 h1:4l7wjgCxb2oD+7Hyf3xIhkGd/6s1sXpRFdQiyy+7Ki8= github.com/k3s-io/kine v0.6.0 h1:4l7wjgCxb2oD+7Hyf3xIhkGd/6s1sXpRFdQiyy+7Ki8=
github.com/k3s-io/kine v0.6.0/go.mod h1:rzCs93+rQHZGOiewMd84PDrER92QeZ6eeHbWkfEy4+w= github.com/k3s-io/kine v0.6.0/go.mod h1:rzCs93+rQHZGOiewMd84PDrER92QeZ6eeHbWkfEy4+w=
github.com/k3s-io/kubernetes v1.21.0-k3s1 h1:pIE2vo7wrwC7nuuislJ9PRWNL/48/Xka2z9MHcdzR7M= github.com/k3s-io/kubernetes v1.21.1-k3s1 h1:X8nEv12/bI3iR2+ARLuzvosPW8iMOisMlklOAeovISw=
github.com/k3s-io/kubernetes v1.21.0-k3s1/go.mod h1:Yx6XZ8zalyqEk7but+j4+5SvLzdyH1eeqZ4cwO+5dD4= github.com/k3s-io/kubernetes v1.21.1-k3s1/go.mod h1:ef++isEL1PW0taH6z7DXrSztPglrZ7jQhyvcMEtm0gQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.0-k3s1 h1:gr4+YinsNPHpl+lNSeJ+56bwsba40oTqUtlnhi/uPyc= github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.1-k3s1 h1:7iwn62FGlOqG9oRHwTY/+AbFlSZffWMqx6WUXjRpQPk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.0-k3s1/go.mod h1:DKjoC7WTLvupppdmb5jEvRDPQENLZqz/stEUs19TOOc= github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.1-k3s1/go.mod h1:DKjoC7WTLvupppdmb5jEvRDPQENLZqz/stEUs19TOOc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.0-k3s1 h1:recjrG98d6Zeeol0MVwWXOOiUwM5x+gWLlB19utkOy8= github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.1-k3s1 h1:xx/SYlAfrliD+w2REUCBMFtQvATof0YitCHpswviL8s=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.0-k3s1/go.mod h1:z4ndT0jW6BSHky3MjKfpX8hfxFiOobduUEPsG67DW+o= github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.1-k3s1/go.mod h1:z4ndT0jW6BSHky3MjKfpX8hfxFiOobduUEPsG67DW+o=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.0-k3s1 h1:zZnKRsGPl/roeHzi8w5HuGoCHtMS0Nc5mFayz585ddQ= github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.1-k3s1 h1:Ehl7DzKcuQqcT1lqNgx8+E2WFfvcnxyFTR408v6/zcE=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.0-k3s1/go.mod h1:nNWKgJz7U8Te/QwaBeDSCEsvmYwqE+lIJtSthnQTluo= github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.1-k3s1/go.mod h1:nNWKgJz7U8Te/QwaBeDSCEsvmYwqE+lIJtSthnQTluo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.0-k3s1 h1:27UN6IaqFHTkJNMI6fl1joDg1oA0i/EsfLtmGWlzVuk= github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.1-k3s1 h1:fHAKfXieH13JHdEIstbWmDsy32d7Lr8jmxZgbMy4D4M=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.0-k3s1/go.mod h1:55vGov1alDiAFatappea/yL2CFIuYfSpQmw8xdZNCxw= github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.1-k3s1/go.mod h1:55vGov1alDiAFatappea/yL2CFIuYfSpQmw8xdZNCxw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.0-k3s1 h1:gdS7EpWsxrR//ZjB5NbesXB637Mn7jXbtsDRXnEMPbw= github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.1-k3s1 h1:iFah9rWBxlUgEE1nrmpOhrMI+vDArwTR6yfL3lS75PU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.0-k3s1/go.mod h1:6CFN1avCVGMDewUGwb1yatAhmu/3XgfOI9wHSPCbOe0= github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.1-k3s1/go.mod h1:4vy7xXlS9QNceWCoFBkdxsp50ToEpoM5TjtRKTRxyF0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.0-k3s1 h1:iH45PtffFv8oiIqwv9gl5MMxfCtIbJ4tc/91kGTd9F4= github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.1-k3s1 h1:LiLc5dwo9eWPCyro6s31zKB/m2mWX8QSU7u+UpIu3O0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.0-k3s1/go.mod h1:dQ/jkraN2wXzFTYGG3wlzs/YappHsx2F/PxtQ9KyDZY= github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.1-k3s1/go.mod h1:dQ/jkraN2wXzFTYGG3wlzs/YappHsx2F/PxtQ9KyDZY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.0-k3s1 h1:rZ699kcSU8aIlHVxaeAVAXIKFXGbNwRrcAR0D2/Cv/8= github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.1-k3s1 h1:AnNUwMVdRRjOqOmNbN05yxoTNH6Gp4YZsDLDqSSXrpo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.0-k3s1/go.mod h1:7Q3crE6C30z+md2412q3PcSR2P2NIv1WxHH5ug8LVmY= github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.1-k3s1/go.mod h1:7Q3crE6C30z+md2412q3PcSR2P2NIv1WxHH5ug8LVmY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.0-k3s1 h1:qOXwlh00n2NHTLNOgbd914dIU8csO9br051KpVi/Aw0= github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.1-k3s1 h1:S961JPII6QbT1j0wAFnRv2EKeRiDZYsCSMkAPReZ4MY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.0-k3s1/go.mod h1:yvPZprzrt0uOuPx/Tkg3zCSdulxPWxWU2nznGYKmMVk= github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.1-k3s1/go.mod h1:yvPZprzrt0uOuPx/Tkg3zCSdulxPWxWU2nznGYKmMVk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.0-k3s1 h1:HCsle/uaJCc6J+e19y275AJbZDfMUP4l2QFpTg+EWVA= github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.1-k3s1 h1:6zVRXrGwZu2G5IN3jjftM9R5tJ++2ou4rWBlkcK+ivo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.0-k3s1/go.mod h1:zeT/sAA82/kLPoYfV1Xc1x0PjZlRvqCsrgRZj0Q/OTc= github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.1-k3s1/go.mod h1:zeT/sAA82/kLPoYfV1Xc1x0PjZlRvqCsrgRZj0Q/OTc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.0-k3s1 h1:/EecAJfCzMKpL0Cc046cKStx2lVVh1Zp3LhguH9BD0g= github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.1-k3s1 h1:97KjbtEom5IvBTbGktGOEFogPx+RruR2LJ61Oq7HpMI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.0-k3s1/go.mod h1:+aA0FBzPTZJn0I03PDY6xs4stHFP1CGYPNmcAkgDvX4= github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.1-k3s1/go.mod h1:+aA0FBzPTZJn0I03PDY6xs4stHFP1CGYPNmcAkgDvX4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.0-k3s1 h1:E3BoGGwpZ5sfsaW4G7m3rdJ9r/iSb68TVk/RBwTcgzY= github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.1-k3s1 h1:EZV6LHZ1Xy96bGhpdonZWMDqJ3vsJO+G4og3Y2bhwFw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.0-k3s1/go.mod h1:W/TBpRMx7t6+ZimgebLr7be4Pfb7JvWpeer+kPlA6eI= github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.1-k3s1/go.mod h1:W/TBpRMx7t6+ZimgebLr7be4Pfb7JvWpeer+kPlA6eI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.0-k3s1 h1:ZRYPnhI/jjz2c+KlnbgB4YxEdTTpE/yYsCAwfPncpGA= github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.1-k3s1 h1:uG7fW+0DgGvKvS+QHlsUUCiplS7TpkKApxqa8kMgQZc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.0-k3s1/go.mod h1:SnaQa8bOBayBORIZwicYBm9QrlwUPi2PKlMQOhI6HAU= github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.1-k3s1/go.mod h1:SnaQa8bOBayBORIZwicYBm9QrlwUPi2PKlMQOhI6HAU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.0-k3s1 h1:71qmZjf0zI6uCNUi4F3ng16TX3KaUB0uxUing4+ZCUI= github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.1-k3s1 h1:A9dgWvKNTG99Dv+wk+9kMYiRR3n8r/O8YbRowznaqvE=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.0-k3s1/go.mod h1:9zdUgM3lMAO5+g35Vyq3OQdMrylvtV97G7t5emDYCOs= github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.1-k3s1/go.mod h1:9zdUgM3lMAO5+g35Vyq3OQdMrylvtV97G7t5emDYCOs=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.0-k3s1 h1:vATek1lQCKeeroQ4Xj23GQ5GRo2cEhzS53Y/5sNGXIA= github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.1-k3s1 h1:ZA9oCLCAQ+QsT4CBsfmoAZC8OpvX3aREqgrmcHdsYDU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.0-k3s1/go.mod h1:NmtKDopOnphD2IlcH9OjoxoI4mEkkgGhVw7dTbdBTY0= github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.1-k3s1/go.mod h1:NmtKDopOnphD2IlcH9OjoxoI4mEkkgGhVw7dTbdBTY0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.0-k3s1 h1:aEZElY9vBuOUs4taV1G7D6DhVXNOWXWkoNziNNaYj5M= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.1-k3s1 h1:Jq6Ix8pq5N6YXhFk9dRsogpr824gsow/31vIia1FLLU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.0-k3s1/go.mod h1:KhCAvsyM1iGJLSql7cTjBXwRzigyfTwRcu1unidRJmo= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.1-k3s1/go.mod h1:KhCAvsyM1iGJLSql7cTjBXwRzigyfTwRcu1unidRJmo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.0-k3s1 h1:V7tCsfGezSEV8Xi3Mmr35jbJpHyav4MlMeMdld6+2Cs= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.1-k3s1 h1:7Dls9SYhW2caBAgvO/d+bDsnbnWn5oryOCldyTi/48g=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.0-k3s1/go.mod h1:46iKO45TZat/zvPyqe8TjLLrTS/U/nGB92Ft63PEPF0= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.1-k3s1/go.mod h1:46iKO45TZat/zvPyqe8TjLLrTS/U/nGB92Ft63PEPF0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.0-k3s1 h1:yhPSspXH46Xd4BW1Fv6tKZzFU0/YjcXqguyDB53z4bQ= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.1-k3s1 h1:1AzewTobcKns6rupjf6ZcNduUbA2saJbRdlj0Xk6A8M=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.0-k3s1/go.mod h1:6mEp02ABsuOeeBuUrrol78v9LYysX7Z8CZOMFlkPOOI= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.1-k3s1/go.mod h1:6mEp02ABsuOeeBuUrrol78v9LYysX7Z8CZOMFlkPOOI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.0-k3s1 h1:ID0QGKO7RcoE34SGa90QlhBgpy+yWGWRdYeHlN9QVNo= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.1-k3s1 h1:orhYkSYqltNEiiwzBZHbi5eiis5NGfAWUHKg/0XBVdk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.0-k3s1/go.mod h1:8YcoZs+nmcq5YEBZXOkUtuLpYpPoZWC7I3CAUOvZGNc= github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.1-k3s1/go.mod h1:8YcoZs+nmcq5YEBZXOkUtuLpYpPoZWC7I3CAUOvZGNc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.0-k3s1 h1:x7Ow0G/StGDmiyLkAA0BSs97L4BykeBgH4oKyIkRlig= github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.1-k3s1 h1:8vP+SyaEgSZjGKLYhwp8//9XwpYYhtLjL2+eLBKs+No=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.0-k3s1/go.mod h1:vNK7RS+0XvfP7/64ypg05JWArnA0ZAxVmkz0QhoHjYQ= github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.1-k3s1/go.mod h1:ZRzv7towE/rnhoSlglnClSdwvG956r8BtWsV8GriLCw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.0-k3s1 h1:tizT7lugmoQWSXTOCgU2yXa4EvndhMixce3tEDv9BV8= github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.1-k3s1 h1:xxDhyVqo40oFL+QRnE7Aq3f4YOmp39hsWs3CG3h/BJA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.0-k3s1/go.mod h1:o8/6oYd5NojfXqZcgzwRY6/N9H0txmvDbs4Sk6Laz0A= github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.1-k3s1/go.mod h1:o8/6oYd5NojfXqZcgzwRY6/N9H0txmvDbs4Sk6Laz0A=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.0-k3s1 h1:XkGtREnW8hGVt81eSI1HuA5mBjP7G4C9kWrO+Vj+z2w= github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.1-k3s1 h1:VNow0MZPpjsdJ4VTVAMvguvUtEM9VRBzcZCepwpqLEQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.0-k3s1/go.mod h1:Fqw7RNHll7guUATPVDLSfOKr2ayRlFGMI3SMrxo5ECU= github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.1-k3s1/go.mod h1:Fqw7RNHll7guUATPVDLSfOKr2ayRlFGMI3SMrxo5ECU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.0-k3s1 h1:vE5mDyQhtFYNtYOP1jr8QqPBvsbPYRun6gUC9DKoJyg= github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.1-k3s1 h1:XkxQNzMRtm5HG8N3m6P6NVcEiTOae6zyQ1T7aZ1m3aM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.0-k3s1/go.mod h1:/SknInvlq+Fm+vrO/Z7JYHjxwIsxAl32mAI9tUH/lGY= github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.1-k3s1/go.mod h1:/SknInvlq+Fm+vrO/Z7JYHjxwIsxAl32mAI9tUH/lGY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.0-k3s1 h1:sAqdI/DP8+Vhi3dHiuQV+7gzt0dF9umR5AnZd+K3dpo= github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.1-k3s1 h1:KxvblpY/4qohv46MZBJtWxwY8u9YSqQnNXinl1d7DGM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.0-k3s1/go.mod h1:99KFJSKqMgMvpCWJr4w6ooLZgR+2usWp5GPaILFNq9k= github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.1-k3s1/go.mod h1:99KFJSKqMgMvpCWJr4w6ooLZgR+2usWp5GPaILFNq9k=
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.21.0-k3s1/go.mod h1:WkDfrpYVRWS0Muw8Vg5XicfVdTxnLvgiT8tX8DSD0Zo= github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.21.1-k3s1/go.mod h1:WkDfrpYVRWS0Muw8Vg5XicfVdTxnLvgiT8tX8DSD0Zo=
github.com/k3s-io/protobuf v1.4.3-k3s1 h1:gduXrSm/6KkbTuctP6bASYqKQ/tyC/PNYqxBmJnk4Tc= github.com/k3s-io/protobuf v1.4.3-k3s1 h1:gduXrSm/6KkbTuctP6bASYqKQ/tyC/PNYqxBmJnk4Tc=
github.com/k3s-io/protobuf v1.4.3-k3s1/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/k3s-io/protobuf v1.4.3-k3s1/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
@ -1258,13 +1258,13 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/kustomize/api v0.8.5 h1:bfCXGXDAbFbb/Jv5AhMj2BB8a5VAJuuQ5/KU69WtDjQ= sigs.k8s.io/kustomize/api v0.8.8 h1:G2z6JPSSjtWWgMeWSoHdXqyftJNmMmyxXpwENGoOtGE=
sigs.k8s.io/kustomize/api v0.8.5/go.mod h1:M377apnKT5ZHJS++6H4rQoCHmWtt6qTpp3mbe7p6OLY= sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY=
sigs.k8s.io/kustomize/cmd/config v0.9.7/go.mod h1:MvXCpHs77cfyxRmCNUQjIqCmZyYsbn5PyQpWiq44nW0= sigs.k8s.io/kustomize/cmd/config v0.9.10/go.mod h1:Mrby0WnRH7hA6OwOYnYpfpiY0WJIMgYrEDfwOeFdMK0=
sigs.k8s.io/kustomize/kustomize/v4 v4.0.5 h1:0xQWp03aKWilF6UJrupcA2rCoCn3jejkJ+m/CCI/Fis= sigs.k8s.io/kustomize/kustomize/v4 v4.1.2 h1:iP3ckqMIftwsIKnMqtztReSkkPJvhqNc5QiOpMoFpbY=
sigs.k8s.io/kustomize/kustomize/v4 v4.0.5/go.mod h1:C7rYla7sI8EnxHE/xEhRBSHMNfcL91fx0uKmUlUhrBk= sigs.k8s.io/kustomize/kustomize/v4 v4.1.2/go.mod h1:PxBvo4WGYlCLeRPL+ziT64wBXqbgfcalOS/SXa/tcyo=
sigs.k8s.io/kustomize/kyaml v0.10.15 h1:dSLgG78KyaxN4HylPXdK+7zB3k7sW6q3IcCmcfKA+aI= sigs.k8s.io/kustomize/kyaml v0.10.17 h1:4zrV0ym5AYa0e512q7K3Wp1u7mzoWW0xR3UHJcGWGIg=
sigs.k8s.io/kustomize/kyaml v0.10.15/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg=
sigs.k8s.io/structured-merge-diff v0.0.0-20190426204423-ea680f03cc65 h1:xJNnO2qzHtgVCSPoGkkltSpyEX7D7IJw1TmbE3G/7lY= sigs.k8s.io/structured-merge-diff v0.0.0-20190426204423-ea680f03cc65 h1:xJNnO2qzHtgVCSPoGkkltSpyEX7D7IJw1TmbE3G/7lY=
sigs.k8s.io/structured-merge-diff v0.0.0-20190426204423-ea680f03cc65/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v0.0.0-20190426204423-ea680f03cc65/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=

View File

@ -148,7 +148,7 @@ const (
PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost" PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost"
// AnnotationTopologyAwareHints can be used to enable or disable Topology // AnnotationTopologyAwareHints can be used to enable or disable Topology
// Aware Hints for a Service. This may be set to "auto" or "disabled". Any // Aware Hints for a Service. This may be set to "Auto" or "Disabled". Any
// other value is treated as "disabled". // other value is treated as "Disabled".
AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints"
) )

View File

@ -166,7 +166,7 @@ func (m *Quantity) Unmarshal(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
if skippy < 0 { if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated return ErrInvalidLengthGenerated
} }
if (iNdEx + skippy) > l { if (iNdEx + skippy) > l {

View File

@ -78,6 +78,8 @@ type Connection interface {
// SetIdleTimeout sets the amount of time the connection may remain idle before // SetIdleTimeout sets the amount of time the connection may remain idle before
// it is automatically closed. // it is automatically closed.
SetIdleTimeout(timeout time.Duration) SetIdleTimeout(timeout time.Duration)
// RemoveStreams can be used to remove a set of streams from the Connection.
RemoveStreams(streams ...Stream)
} }
// Stream represents a bidirectional communications channel that is part of an // Stream represents a bidirectional communications channel that is part of an

View File

@ -31,7 +31,7 @@ import (
// streams. // streams.
type connection struct { type connection struct {
conn *spdystream.Connection conn *spdystream.Connection
streams []httpstream.Stream streams map[uint32]httpstream.Stream
streamLock sync.Mutex streamLock sync.Mutex
newStreamHandler httpstream.NewStreamHandler newStreamHandler httpstream.NewStreamHandler
ping func() (time.Duration, error) ping func() (time.Duration, error)
@ -85,7 +85,12 @@ func NewServerConnectionWithPings(conn net.Conn, newStreamHandler httpstream.New
// will be invoked when the server receives a newly created stream from the // will be invoked when the server receives a newly created stream from the
// client. // client.
func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration, pingFn func() (time.Duration, error)) httpstream.Connection { func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration, pingFn func() (time.Duration, error)) httpstream.Connection {
c := &connection{conn: conn, newStreamHandler: newStreamHandler, ping: pingFn} c := &connection{
conn: conn,
newStreamHandler: newStreamHandler,
ping: pingFn,
streams: make(map[uint32]httpstream.Stream),
}
go conn.Serve(c.newSpdyStream) go conn.Serve(c.newSpdyStream)
if pingPeriod > 0 && pingFn != nil { if pingPeriod > 0 && pingFn != nil {
go c.sendPings(pingPeriod) go c.sendPings(pingPeriod)
@ -105,7 +110,7 @@ func (c *connection) Close() error {
// calling Reset instead of Close ensures that all streams are fully torn down // calling Reset instead of Close ensures that all streams are fully torn down
s.Reset() s.Reset()
} }
c.streams = make([]httpstream.Stream, 0) c.streams = make(map[uint32]httpstream.Stream, 0)
c.streamLock.Unlock() c.streamLock.Unlock()
// now that all streams are fully torn down, it's safe to call close on the underlying connection, // now that all streams are fully torn down, it's safe to call close on the underlying connection,
@ -114,6 +119,15 @@ func (c *connection) Close() error {
return c.conn.Close() return c.conn.Close()
} }
// RemoveStreams can be used to removes a set of streams from the Connection.
func (c *connection) RemoveStreams(streams ...httpstream.Stream) {
c.streamLock.Lock()
for _, stream := range streams {
delete(c.streams, stream.Identifier())
}
c.streamLock.Unlock()
}
// CreateStream creates a new stream with the specified headers and registers // CreateStream creates a new stream with the specified headers and registers
// it with the connection. // it with the connection.
func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) { func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) {
@ -133,7 +147,7 @@ func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error
// it owns. // it owns.
func (c *connection) registerStream(s httpstream.Stream) { func (c *connection) registerStream(s httpstream.Stream) {
c.streamLock.Lock() c.streamLock.Lock()
c.streams = append(c.streams, s) c.streams[s.Identifier()] = s
c.streamLock.Unlock() c.streamLock.Unlock()
} }

View File

@ -64,6 +64,7 @@ var (
} }
SuggestedFlowSchemas = []*flowcontrol.FlowSchema{ SuggestedFlowSchemas = []*flowcontrol.FlowSchema{
SuggestedFlowSchemaSystemNodes, // references "system" priority-level SuggestedFlowSchemaSystemNodes, // references "system" priority-level
SuggestedFlowSchemaProbes, // references "exempt" priority-level
SuggestedFlowSchemaSystemLeaderElection, // references "leader-election" priority-level SuggestedFlowSchemaSystemLeaderElection, // references "leader-election" priority-level
SuggestedFlowSchemaWorkloadLeaderElection, // references "leader-election" priority-level SuggestedFlowSchemaWorkloadLeaderElection, // references "leader-election" priority-level
SuggestedFlowSchemaKubeControllerManager, // references "workload-high" priority-level SuggestedFlowSchemaKubeControllerManager, // references "workload-high" priority-level
@ -394,6 +395,19 @@ var (
}, },
}, },
) )
// the following flow schema exempts probes
SuggestedFlowSchemaProbes = newFlowSchema(
"probes", "exempt", 2,
"", // distinguisherMethodType
flowcontrol.PolicyRulesWithSubjects{
Subjects: groups(user.AllUnauthenticated, user.AllAuthenticated),
NonResourceRules: []flowcontrol.NonResourcePolicyRule{
nonResourceRule(
[]string{"get"},
[]string{"/healthz", "/readyz", "/livez"}),
},
},
)
) )
func newPriorityLevelConfiguration(name string, spec flowcontrol.PriorityLevelConfigurationSpec) *flowcontrol.PriorityLevelConfiguration { func newPriorityLevelConfiguration(name string, spec flowcontrol.PriorityLevelConfigurationSpec) *flowcontrol.PriorityLevelConfiguration {

View File

@ -44,6 +44,9 @@ type DelegatingAuthenticatorConfig struct {
// TokenAccessReviewClient is a client to do token review. It can be nil. Then every token is ignored. // TokenAccessReviewClient is a client to do token review. It can be nil. Then every token is ignored.
TokenAccessReviewClient authenticationclient.TokenReviewInterface TokenAccessReviewClient authenticationclient.TokenReviewInterface
// TokenAccessReviewTimeout specifies a time limit for requests made by the authorization webhook client.
TokenAccessReviewTimeout time.Duration
// WebhookRetryBackoff specifies the backoff parameters for the authentication webhook retry logic. // WebhookRetryBackoff specifies the backoff parameters for the authentication webhook retry logic.
// This allows us to configure the sleep time at each iteration and the maximum number of retries allowed // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed
// before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded.
@ -88,7 +91,7 @@ func (c DelegatingAuthenticatorConfig) New() (authenticator.Request, *spec.Secur
if c.WebhookRetryBackoff == nil { if c.WebhookRetryBackoff == nil {
return nil, nil, errors.New("retry backoff parameters for delegating authentication webhook has not been specified") return nil, nil, errors.New("retry backoff parameters for delegating authentication webhook has not been specified")
} }
tokenAuth, err := webhooktoken.NewFromInterface(c.TokenAccessReviewClient, c.APIAudiences, *c.WebhookRetryBackoff) tokenAuth, err := webhooktoken.NewFromInterface(c.TokenAccessReviewClient, c.APIAudiences, *c.WebhookRetryBackoff, c.TokenAccessReviewTimeout)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

View File

@ -195,9 +195,9 @@ type DelegatingAuthenticationOptions struct {
// before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded.
WebhookRetryBackoff *wait.Backoff WebhookRetryBackoff *wait.Backoff
// ClientTimeout specifies a time limit for requests made by the authorization webhook client. // TokenRequestTimeout specifies a time limit for requests made by the authorization webhook client.
// The default value is set to 10 seconds. // The default value is set to 10 seconds.
ClientTimeout time.Duration TokenRequestTimeout time.Duration
} }
func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions { func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions {
@ -211,7 +211,7 @@ func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions {
ExtraHeaderPrefixes: []string{"x-remote-extra-"}, ExtraHeaderPrefixes: []string{"x-remote-extra-"},
}, },
WebhookRetryBackoff: DefaultAuthWebhookRetryBackoff(), WebhookRetryBackoff: DefaultAuthWebhookRetryBackoff(),
ClientTimeout: 10 * time.Second, TokenRequestTimeout: 10 * time.Second,
} }
} }
@ -220,9 +220,9 @@ func (s *DelegatingAuthenticationOptions) WithCustomRetryBackoff(backoff wait.Ba
s.WebhookRetryBackoff = &backoff s.WebhookRetryBackoff = &backoff
} }
// WithClientTimeout sets the given timeout for the authentication webhook client. // WithRequestTimeout sets the given timeout for requests made by the authentication webhook client.
func (s *DelegatingAuthenticationOptions) WithClientTimeout(timeout time.Duration) { func (s *DelegatingAuthenticationOptions) WithRequestTimeout(timeout time.Duration) {
s.ClientTimeout = timeout s.TokenRequestTimeout = timeout
} }
func (s *DelegatingAuthenticationOptions) Validate() []error { func (s *DelegatingAuthenticationOptions) Validate() []error {
@ -274,9 +274,10 @@ func (s *DelegatingAuthenticationOptions) ApplyTo(authenticationInfo *server.Aut
} }
cfg := authenticatorfactory.DelegatingAuthenticatorConfig{ cfg := authenticatorfactory.DelegatingAuthenticatorConfig{
Anonymous: true, Anonymous: true,
CacheTTL: s.CacheTTL, CacheTTL: s.CacheTTL,
WebhookRetryBackoff: s.WebhookRetryBackoff, WebhookRetryBackoff: s.WebhookRetryBackoff,
TokenAccessReviewTimeout: s.TokenRequestTimeout,
} }
client, err := s.getClient() client, err := s.getClient()
@ -419,7 +420,10 @@ func (s *DelegatingAuthenticationOptions) getClient() (kubernetes.Interface, err
// set high qps/burst limits since this will effectively limit API server responsiveness // set high qps/burst limits since this will effectively limit API server responsiveness
clientConfig.QPS = 200 clientConfig.QPS = 200
clientConfig.Burst = 400 clientConfig.Burst = 400
clientConfig.Timeout = s.ClientTimeout // do not set a timeout on the http client, instead use context for cancellation
// if multiple timeouts were set, the request will pick the smaller timeout to be applied, leaving other useless.
//
// see https://github.com/golang/go/blob/a937729c2c2f6950a32bc5cd0f5b88700882f078/src/net/http/client.go#L364
return kubernetes.NewForConfig(clientConfig) return kubernetes.NewForConfig(clientConfig)
} }

View File

@ -52,17 +52,18 @@ type tokenReviewer interface {
} }
type WebhookTokenAuthenticator struct { type WebhookTokenAuthenticator struct {
tokenReview tokenReviewer tokenReview tokenReviewer
retryBackoff wait.Backoff retryBackoff wait.Backoff
implicitAuds authenticator.Audiences implicitAuds authenticator.Audiences
requestTimeout time.Duration
} }
// NewFromInterface creates a webhook authenticator using the given tokenReview // NewFromInterface creates a webhook authenticator using the given tokenReview
// client. It is recommend to wrap this authenticator with the token cache // client. It is recommend to wrap this authenticator with the token cache
// authenticator implemented in // authenticator implemented in
// k8s.io/apiserver/pkg/authentication/token/cache. // k8s.io/apiserver/pkg/authentication/token/cache.
func NewFromInterface(tokenReview authenticationv1client.TokenReviewInterface, implicitAuds authenticator.Audiences, retryBackoff wait.Backoff) (*WebhookTokenAuthenticator, error) { func NewFromInterface(tokenReview authenticationv1client.TokenReviewInterface, implicitAuds authenticator.Audiences, retryBackoff wait.Backoff, requestTimeout time.Duration) (*WebhookTokenAuthenticator, error) {
return newWithBackoff(tokenReview, retryBackoff, implicitAuds) return newWithBackoff(tokenReview, retryBackoff, implicitAuds, requestTimeout)
} }
// New creates a new WebhookTokenAuthenticator from the provided kubeconfig // New creates a new WebhookTokenAuthenticator from the provided kubeconfig
@ -74,12 +75,12 @@ func New(kubeConfigFile string, version string, implicitAuds authenticator.Audie
if err != nil { if err != nil {
return nil, err return nil, err
} }
return newWithBackoff(tokenReview, retryBackoff, implicitAuds) return newWithBackoff(tokenReview, retryBackoff, implicitAuds, time.Duration(0))
} }
// newWithBackoff allows tests to skip the sleep. // newWithBackoff allows tests to skip the sleep.
func newWithBackoff(tokenReview tokenReviewer, retryBackoff wait.Backoff, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { func newWithBackoff(tokenReview tokenReviewer, retryBackoff wait.Backoff, implicitAuds authenticator.Audiences, requestTimeout time.Duration) (*WebhookTokenAuthenticator, error) {
return &WebhookTokenAuthenticator{tokenReview, retryBackoff, implicitAuds}, nil return &WebhookTokenAuthenticator{tokenReview, retryBackoff, implicitAuds, requestTimeout}, nil
} }
// AuthenticateToken implements the authenticator.Token interface. // AuthenticateToken implements the authenticator.Token interface.
@ -105,7 +106,17 @@ func (w *WebhookTokenAuthenticator) AuthenticateToken(ctx context.Context, token
var ( var (
result *authenticationv1.TokenReview result *authenticationv1.TokenReview
auds authenticator.Audiences auds authenticator.Audiences
cancel context.CancelFunc
) )
// set a hard timeout if it was defined
// if the child has a shorter deadline then it will expire first,
// otherwise if the parent has a shorter deadline then the parent will expire and it will be propagate to the child
if w.requestTimeout > 0 {
ctx, cancel = context.WithTimeout(ctx, w.requestTimeout)
defer cancel()
}
// WithExponentialBackoff will return tokenreview create error (tokenReviewErr) if any. // WithExponentialBackoff will return tokenreview create error (tokenReviewErr) if any.
if err := webhook.WithExponentialBackoff(ctx, w.retryBackoff, func() error { if err := webhook.WithExponentialBackoff(ctx, w.retryBackoff, func() error {
var tokenReviewErr error var tokenReviewErr error

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "21" gitMinor = "21"
gitVersion = "v1.21.0-k3s1" gitVersion = "v1.21.1-k3s1"
gitCommit = "bcdd3feac64be5b58c8f3207f0eb6f6a02ea9f4d" gitCommit = "2748979665974057d48bc75c1bfe05ae5257300c"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2021-04-12T18:20:57Z" buildDate = "2021-05-14T01:38:27Z"
) )

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "21" gitMinor = "21"
gitVersion = "v1.21.0-k3s1" gitVersion = "v1.21.1-k3s1"
gitCommit = "bcdd3feac64be5b58c8f3207f0eb6f6a02ea9f4d" gitCommit = "2748979665974057d48bc75c1bfe05ae5257300c"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2021-04-12T18:20:57Z" buildDate = "2021-05-14T01:38:27Z"
) )

View File

@ -86,7 +86,7 @@ func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeStorageClassToCSI(sc
// TranslateInTreeInlineVolumeToCSI takes a Volume with AWSElasticBlockStore set from in-tree // TranslateInTreeInlineVolumeToCSI takes a Volume with AWSElasticBlockStore set from in-tree
// and converts the AWSElasticBlockStore source to a CSIPersistentVolumeSource // and converts the AWSElasticBlockStore source to a CSIPersistentVolumeSource
func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) { func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error) {
if volume == nil || volume.AWSElasticBlockStore == nil { if volume == nil || volume.AWSElasticBlockStore == nil {
return nil, fmt.Errorf("volume is nil or AWS EBS not defined on volume") return nil, fmt.Errorf("volume is nil or AWS EBS not defined on volume")
} }

View File

@ -93,7 +93,7 @@ func (t *azureDiskCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.St
// TranslateInTreeInlineVolumeToCSI takes a Volume with AzureDisk set from in-tree // TranslateInTreeInlineVolumeToCSI takes a Volume with AzureDisk set from in-tree
// and converts the AzureDisk source to a CSIPersistentVolumeSource // and converts the AzureDisk source to a CSIPersistentVolumeSource
func (t *azureDiskCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) { func (t *azureDiskCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error) {
if volume == nil || volume.AzureDisk == nil { if volume == nil || volume.AzureDisk == nil {
return nil, fmt.Errorf("volume is nil or Azure Disk not defined on volume") return nil, fmt.Errorf("volume is nil or Azure Disk not defined on volume")
} }

View File

@ -65,7 +65,7 @@ func (t *azureFileCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.St
// TranslateInTreeInlineVolumeToCSI takes a Volume with AzureFile set from in-tree // TranslateInTreeInlineVolumeToCSI takes a Volume with AzureFile set from in-tree
// and converts the AzureFile source to a CSIPersistentVolumeSource // and converts the AzureFile source to a CSIPersistentVolumeSource
func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) { func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error) {
if volume == nil || volume.AzureFile == nil { if volume == nil || volume.AzureFile == nil {
return nil, fmt.Errorf("volume is nil or Azure File not defined on volume") return nil, fmt.Errorf("volume is nil or Azure File not defined on volume")
} }
@ -77,6 +77,11 @@ func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Vol
accountName = azureSource.SecretName accountName = azureSource.SecretName
} }
secretNamespace := defaultSecretNamespace
if podNamespace != "" {
secretNamespace = podNamespace
}
var ( var (
pv = &v1.PersistentVolume{ pv = &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -93,7 +98,7 @@ func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Vol
VolumeAttributes: map[string]string{shareNameField: azureSource.ShareName}, VolumeAttributes: map[string]string{shareNameField: azureSource.ShareName},
NodeStageSecretRef: &v1.SecretReference{ NodeStageSecretRef: &v1.SecretReference{
Name: azureSource.SecretName, Name: azureSource.SecretName,
Namespace: defaultSecretNamespace, Namespace: secretNamespace,
}, },
}, },
}, },

View File

@ -162,7 +162,7 @@ func backwardCompatibleAccessModes(ams []v1.PersistentVolumeAccessMode) []v1.Per
// TranslateInTreeInlineVolumeToCSI takes a Volume with GCEPersistentDisk set from in-tree // TranslateInTreeInlineVolumeToCSI takes a Volume with GCEPersistentDisk set from in-tree
// and converts the GCEPersistentDisk source to a CSIPersistentVolumeSource // and converts the GCEPersistentDisk source to a CSIPersistentVolumeSource
func (g *gcePersistentDiskCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) { func (g *gcePersistentDiskCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error) {
if volume == nil || volume.GCEPersistentDisk == nil { if volume == nil || volume.GCEPersistentDisk == nil {
return nil, fmt.Errorf("volume is nil or GCE PD not defined on volume") return nil, fmt.Errorf("volume is nil or GCE PD not defined on volume")
} }

View File

@ -37,7 +37,8 @@ type InTreePlugin interface {
// TranslateInTreeInlineVolumeToCSI takes a inline volume and will translate // TranslateInTreeInlineVolumeToCSI takes a inline volume and will translate
// the in-tree inline volume source to a CSIPersistentVolumeSource // the in-tree inline volume source to a CSIPersistentVolumeSource
// A PV object containing the CSIPersistentVolumeSource in it's spec is returned // A PV object containing the CSIPersistentVolumeSource in it's spec is returned
TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) // podNamespace is only needed for azurefile to fetch secret namespace, no need to be set for other plugins.
TranslateInTreeInlineVolumeToCSI(volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error)
// TranslateInTreePVToCSI takes a persistent volume and will translate // TranslateInTreePVToCSI takes a persistent volume and will translate
// the in-tree pv source to a CSI Source. The input persistent volume can be modified // the in-tree pv source to a CSI Source. The input persistent volume can be modified

View File

@ -75,7 +75,7 @@ func (t *osCinderCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.Sto
// TranslateInTreeInlineVolumeToCSI takes a Volume with Cinder set from in-tree // TranslateInTreeInlineVolumeToCSI takes a Volume with Cinder set from in-tree
// and converts the Cinder source to a CSIPersistentVolumeSource // and converts the Cinder source to a CSIPersistentVolumeSource
func (t *osCinderCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) { func (t *osCinderCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error) {
if volume == nil || volume.Cinder == nil { if volume == nil || volume.Cinder == nil {
return nil, fmt.Errorf("volume is nil or Cinder not defined on volume") return nil, fmt.Errorf("volume is nil or Cinder not defined on volume")
} }

View File

@ -111,7 +111,7 @@ func (t *vSphereCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.Stor
// TranslateInTreeInlineVolumeToCSI takes a Volume with VsphereVolume set from in-tree // TranslateInTreeInlineVolumeToCSI takes a Volume with VsphereVolume set from in-tree
// and converts the VsphereVolume source to a CSIPersistentVolumeSource // and converts the VsphereVolume source to a CSIPersistentVolumeSource
func (t *vSphereCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) { func (t *vSphereCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error) {
if volume == nil || volume.VsphereVolume == nil { if volume == nil || volume.VsphereVolume == nil {
return nil, fmt.Errorf("volume is nil or VsphereVolume not defined on volume") return nil, fmt.Errorf("volume is nil or VsphereVolume not defined on volume")
} }

View File

@ -62,13 +62,13 @@ func (CSITranslator) TranslateInTreeStorageClassToCSI(inTreePluginName string, s
// TranslateInTreeInlineVolumeToCSI takes a inline volume and will translate // TranslateInTreeInlineVolumeToCSI takes a inline volume and will translate
// the in-tree volume source to a CSIPersistentVolumeSource (wrapped in a PV) // the in-tree volume source to a CSIPersistentVolumeSource (wrapped in a PV)
// if the translation logic has been implemented. // if the translation logic has been implemented.
func (CSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) { func (CSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error) {
if volume == nil { if volume == nil {
return nil, fmt.Errorf("persistent volume was nil") return nil, fmt.Errorf("persistent volume was nil")
} }
for _, curPlugin := range inTreePlugins { for _, curPlugin := range inTreePlugins {
if curPlugin.CanSupportInline(volume) { if curPlugin.CanSupportInline(volume) {
pv, err := curPlugin.TranslateInTreeInlineVolumeToCSI(volume) pv, err := curPlugin.TranslateInTreeInlineVolumeToCSI(volume, podNamespace)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -228,6 +228,12 @@ func (o *CreateIngressOptions) Validate() error {
} }
} }
for _, annotation := range o.Annotations {
if an := strings.SplitN(annotation, "=", 2); len(an) != 2 {
return fmt.Errorf("annotation %s is invalid and should be in format key=[value]", annotation)
}
}
if len(o.DefaultBackend) > 0 && len(strings.Split(o.DefaultBackend, ":")) != 2 { if len(o.DefaultBackend) > 0 && len(strings.Split(o.DefaultBackend, ":")) != 2 {
return fmt.Errorf("default-backend should be in format servicename:serviceport") return fmt.Errorf("default-backend should be in format servicename:serviceport")
} }
@ -285,8 +291,8 @@ func (o *CreateIngressOptions) createIngress() *networkingv1.Ingress {
} }
func (o *CreateIngressOptions) buildAnnotations() map[string]string { func (o *CreateIngressOptions) buildAnnotations() map[string]string {
var annotations map[string]string
annotations = make(map[string]string) var annotations = make(map[string]string)
for _, annotation := range o.Annotations { for _, annotation := range o.Annotations {
an := strings.SplitN(annotation, "=", 2) an := strings.SplitN(annotation, "=", 2)

View File

@ -72,6 +72,7 @@ type ServiceOptions struct {
FieldManager string FieldManager string
CreateAnnotation bool CreateAnnotation bool
Namespace string Namespace string
EnforceNamespace bool
Client corev1client.CoreV1Interface Client corev1client.CoreV1Interface
DryRunStrategy cmdutil.DryRunStrategy DryRunStrategy cmdutil.DryRunStrategy
@ -105,7 +106,7 @@ func (o *ServiceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []
return err return err
} }
o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
if err != nil { if err != nil {
return err return err
} }
@ -173,13 +174,19 @@ func (o *ServiceOptions) createService() (*corev1.Service, error) {
selector := map[string]string{} selector := map[string]string{}
selector["app"] = o.Name selector["app"] = o.Name
namespace := ""
if o.EnforceNamespace {
namespace = o.Namespace
}
service := corev1.Service{ service := corev1.Service{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: o.Name, Name: o.Name,
Labels: labels, Labels: labels,
Namespace: namespace,
}, },
Spec: corev1.ServiceSpec{ Spec: corev1.ServiceSpec{
Type: corev1.ServiceType(o.Type), Type: o.Type,
Selector: selector, Selector: selector,
Ports: ports, Ports: ports,
ExternalName: o.ExternalName, ExternalName: o.ExternalName,

View File

@ -2166,7 +2166,7 @@ func describeJob(job *batchv1.Job, events *corev1.EventList) (string, error) {
w.Write(LEVEL_0, "Completions:\t<unset>\n") w.Write(LEVEL_0, "Completions:\t<unset>\n")
} }
if job.Spec.CompletionMode != nil { if job.Spec.CompletionMode != nil {
w.Write(LEVEL_0, "Completion Mode:\t%s\n", job.Spec.CompletionMode) w.Write(LEVEL_0, "Completion Mode:\t%s\n", *job.Spec.CompletionMode)
} }
if job.Status.StartTime != nil { if job.Status.StartTime != nil {
w.Write(LEVEL_0, "Start Time:\t%s\n", job.Status.StartTime.Time.Format(time.RFC1123Z)) w.Write(LEVEL_0, "Start Time:\t%s\n", job.Status.StartTime.Time.Format(time.RFC1123Z))
@ -3055,7 +3055,7 @@ func describeEndpointSliceV1(eps *discoveryv1.EndpointSlice, events *corev1.Even
w.Write(LEVEL_2, "NodeName:\t%s\n", nodeNameText) w.Write(LEVEL_2, "NodeName:\t%s\n", nodeNameText)
zoneText := "<unset>" zoneText := "<unset>"
if endpoint.NodeName != nil { if endpoint.Zone != nil {
zoneText = *endpoint.Zone zoneText = *endpoint.Zone
} }
w.Write(LEVEL_2, "Zone:\t%s\n", zoneText) w.Write(LEVEL_2, "Zone:\t%s\n", zoneText)

View File

@ -365,6 +365,12 @@ func newProxyServer(
} }
} }
useEndpointSlices := utilfeature.DefaultFeatureGate.Enabled(features.EndpointSliceProxying)
if proxyMode == proxyModeUserspace {
// userspace mode doesn't support endpointslice.
useEndpointSlices = false
}
var connTracker Conntracker var connTracker Conntracker
if !libcontainersystem.RunningInUserNS() { if !libcontainersystem.RunningInUserNS() {
// if we are in userns, sysctl does not work and connTracker should be kept nil // if we are in userns, sysctl does not work and connTracker should be kept nil
@ -391,7 +397,7 @@ func newProxyServer(
OOMScoreAdj: config.OOMScoreAdj, OOMScoreAdj: config.OOMScoreAdj,
ConfigSyncPeriod: config.ConfigSyncPeriod.Duration, ConfigSyncPeriod: config.ConfigSyncPeriod.Duration,
HealthzServer: healthzServer, HealthzServer: healthzServer,
UseEndpointSlices: utilfeature.DefaultFeatureGate.Enabled(features.EndpointSliceProxying), UseEndpointSlices: useEndpointSlices,
}, nil }, nil
} }

View File

@ -160,7 +160,11 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi
return nil, fmt.Errorf("unable to create proxier: %v", err) return nil, fmt.Errorf("unable to create proxier: %v", err)
} }
} }
useEndpointSlices := utilfeature.DefaultFeatureGate.Enabled(features.WindowsEndpointSliceProxying)
if proxyMode == proxyModeUserspace {
// userspace mode doesn't support endpointslice.
useEndpointSlices = false
}
return &ProxyServer{ return &ProxyServer{
Client: client, Client: client,
EventClient: eventClient, EventClient: eventClient,
@ -175,7 +179,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi
OOMScoreAdj: config.OOMScoreAdj, OOMScoreAdj: config.OOMScoreAdj,
ConfigSyncPeriod: config.ConfigSyncPeriod.Duration, ConfigSyncPeriod: config.ConfigSyncPeriod.Duration,
HealthzServer: healthzServer, HealthzServer: healthzServer,
UseEndpointSlices: utilfeature.DefaultFeatureGate.Enabled(features.WindowsEndpointSliceProxying), UseEndpointSlices: useEndpointSlices,
}, nil }, nil
} }

View File

@ -126,7 +126,7 @@ const (
PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost" PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost"
// AnnotationTopologyAwareHints can be used to enable or disable Topology // AnnotationTopologyAwareHints can be used to enable or disable Topology
// Aware Hints for a Service. This may be set to "auto" or "disabled". Any // Aware Hints for a Service. This may be set to "Auto" or "Disabled". Any
// other value is treated as "disabled". // other value is treated as "Disabled".
AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints"
) )

View File

@ -4267,7 +4267,7 @@ func ValidateService(service *core.Service) field.ErrorList {
allErrs = append(allErrs, field.Invalid(idxPath, ip, msgs[i])) allErrs = append(allErrs, field.Invalid(idxPath, ip, msgs[i]))
} }
} else { } else {
allErrs = append(allErrs, validateNonSpecialIP(ip, idxPath)...) allErrs = append(allErrs, ValidateNonSpecialIP(ip, idxPath)...)
} }
} }
@ -5755,15 +5755,19 @@ func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path)
allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg)) allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg))
} }
} }
allErrs = append(allErrs, validateNonSpecialIP(address.IP, fldPath.Child("ip"))...) allErrs = append(allErrs, ValidateNonSpecialIP(address.IP, fldPath.Child("ip"))...)
return allErrs return allErrs
} }
func validateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList { // ValidateNonSpecialIP is used to validate Endpoints, EndpointSlices, and
// We disallow some IPs as endpoints or external-ips. Specifically, // external IPs. Specifically, this disallows unspecified and loopback addresses
// unspecified and loopback addresses are nonsensical and link-local // are nonsensical and link-local addresses tend to be used for node-centric
// addresses tend to be used for node-centric purposes (e.g. metadata // purposes (e.g. metadata service).
// service). //
// IPv6 references
// - https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
// - https://www.iana.org/assignments/ipv6-multicast-addresses/ipv6-multicast-addresses.xhtml
func ValidateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
ip := net.ParseIP(ipAddress) ip := net.ParseIP(ipAddress)
if ip == nil { if ip == nil {

View File

@ -100,8 +100,10 @@ func validateEndpoints(endpoints []discovery.Endpoint, addrType discovery.Addres
switch addrType { switch addrType {
case discovery.AddressTypeIPv4: case discovery.AddressTypeIPv4:
allErrs = append(allErrs, validation.IsValidIPv4Address(addressPath.Index(i), address)...) allErrs = append(allErrs, validation.IsValidIPv4Address(addressPath.Index(i), address)...)
allErrs = append(allErrs, apivalidation.ValidateNonSpecialIP(address, addressPath.Index(i))...)
case discovery.AddressTypeIPv6: case discovery.AddressTypeIPv6:
allErrs = append(allErrs, validation.IsValidIPv6Address(addressPath.Index(i), address)...) allErrs = append(allErrs, validation.IsValidIPv6Address(addressPath.Index(i), address)...)
allErrs = append(allErrs, apivalidation.ValidateNonSpecialIP(address, addressPath.Index(i))...)
case discovery.AddressTypeFQDN: case discovery.AddressTypeFQDN:
allErrs = append(allErrs, validation.IsFullyQualifiedDomainName(addressPath.Index(i), address)...) allErrs = append(allErrs, validation.IsFullyQualifiedDomainName(addressPath.Index(i), address)...)
} }

View File

@ -102,7 +102,7 @@ var (
Name: "endpointslices_changed_per_sync", Name: "endpointslices_changed_per_sync",
Help: "Number of EndpointSlices changed on each Service sync", Help: "Number of EndpointSlices changed on each Service sync",
}, },
[]string{"topology"}, // either "auto" or "disabled" []string{"topology"}, // either "Auto" or "Disabled"
) )
// EndpointSliceSyncs tracks the number of sync operations the controller // EndpointSliceSyncs tracks the number of sync operations the controller

View File

@ -330,9 +330,9 @@ func (r *reconciler) finalize(
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc() metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
} }
topologyLabel := "disabled" topologyLabel := "Disabled"
if r.topologyCache != nil && hintsEnabled(service.Annotations) { if r.topologyCache != nil && hintsEnabled(service.Annotations) {
topologyLabel = "auto" topologyLabel = "Auto"
} }
numSlicesChanged := len(slicesToCreate) + len(slicesToUpdate) + len(slicesToDelete) numSlicesChanged := len(slicesToCreate) + len(slicesToUpdate) + len(slicesToDelete)

View File

@ -386,11 +386,11 @@ func unchangedSlices(existingSlices, slicesToUpdate, slicesToDelete []*discovery
} }
// hintsEnabled returns true if the provided annotations include a // hintsEnabled returns true if the provided annotations include a
// corev1.AnnotationTopologyAwareHints key with a value set to "auto". // corev1.AnnotationTopologyAwareHints key with a value set to "Auto" or "auto".
func hintsEnabled(annotations map[string]string) bool { func hintsEnabled(annotations map[string]string) bool {
val, ok := annotations[corev1.AnnotationTopologyAwareHints] val, ok := annotations[corev1.AnnotationTopologyAwareHints]
if !ok { if !ok {
return false return false
} }
return val == "auto" return val == "Auto" || val == "auto"
} }

View File

@ -21,7 +21,7 @@ import (
"fmt" "fmt"
"strings" "strings"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -84,7 +84,7 @@ func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName,
err) err)
} }
volumeSpec, err = translateInTreeSpecToCSIIfNeeded(volumeSpec, nodeName, vpm, csiMigratedPluginManager, csiTranslator) volumeSpec, err = translateInTreeSpecToCSIIfNeeded(volumeSpec, nodeName, vpm, csiMigratedPluginManager, csiTranslator, pod.Namespace)
if err != nil { if err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"error performing CSI migration checks and translation for PVC %q/%q: %v", "error performing CSI migration checks and translation for PVC %q/%q: %v",
@ -109,7 +109,7 @@ func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName,
clonedPodVolume := podVolume.DeepCopy() clonedPodVolume := podVolume.DeepCopy()
origspec := volume.NewSpecFromVolume(clonedPodVolume) origspec := volume.NewSpecFromVolume(clonedPodVolume)
spec, err := translateInTreeSpecToCSIIfNeeded(origspec, nodeName, vpm, csiMigratedPluginManager, csiTranslator) spec, err := translateInTreeSpecToCSIIfNeeded(origspec, nodeName, vpm, csiMigratedPluginManager, csiTranslator, pod.Namespace)
if err != nil { if err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"error performing CSI migration checks and translation for inline volume %q: %v", "error performing CSI migration checks and translation for inline volume %q: %v",
@ -285,7 +285,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
return return
} }
func translateInTreeSpecToCSIIfNeeded(spec *volume.Spec, nodeName types.NodeName, vpm *volume.VolumePluginMgr, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) (*volume.Spec, error) { func translateInTreeSpecToCSIIfNeeded(spec *volume.Spec, nodeName types.NodeName, vpm *volume.VolumePluginMgr, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator, podNamespace string) (*volume.Spec, error) {
translatedSpec := spec translatedSpec := spec
migratable, err := csiMigratedPluginManager.IsMigratable(spec) migratable, err := csiMigratedPluginManager.IsMigratable(spec)
if err != nil { if err != nil {
@ -300,7 +300,7 @@ func translateInTreeSpecToCSIIfNeeded(spec *volume.Spec, nodeName types.NodeName
return nil, err return nil, err
} }
if migratable && migrationSupportedOnNode { if migratable && migrationSupportedOnNode {
translatedSpec, err = csimigration.TranslateInTreeSpecToCSI(spec, csiTranslator) translatedSpec, err = csimigration.TranslateInTreeSpecToCSI(spec, podNamespace, csiTranslator)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -17,6 +17,8 @@ limitations under the License.
package config package config
import ( import (
"time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
@ -24,13 +26,32 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
) )
// WaitForAPIServerSyncPeriod is the period between checks for the node list/watch initial sync
const WaitForAPIServerSyncPeriod = 1 * time.Second
// NewSourceApiserver creates a config source that watches and pulls from the apiserver. // NewSourceApiserver creates a config source that watches and pulls from the apiserver.
func NewSourceApiserver(c clientset.Interface, nodeName types.NodeName, updates chan<- interface{}) { func NewSourceApiserver(c clientset.Interface, nodeName types.NodeName, nodeHasSynced func() bool, updates chan<- interface{}) {
lw := cache.NewListWatchFromClient(c.CoreV1().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector("spec.nodeName", string(nodeName))) lw := cache.NewListWatchFromClient(c.CoreV1().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector("spec.nodeName", string(nodeName)))
newSourceApiserverFromLW(lw, updates)
// The Reflector responsible for watching pods at the apiserver should be run only after
// the node sync with the apiserver has completed.
klog.InfoS("Waiting for node sync before watching apiserver pods")
go func() {
for {
if nodeHasSynced() {
klog.V(4).InfoS("node sync completed")
break
}
time.Sleep(WaitForAPIServerSyncPeriod)
klog.V(4).InfoS("node sync has not completed yet")
}
klog.InfoS("Watching apiserver")
newSourceApiserverFromLW(lw, updates)
}()
} }
// newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver. // newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver.

View File

@ -163,6 +163,10 @@ func (h *httpStreamHandler) removeStreamPair(requestID string) {
h.streamPairsLock.Lock() h.streamPairsLock.Lock()
defer h.streamPairsLock.Unlock() defer h.streamPairsLock.Unlock()
if h.conn != nil {
pair := h.streamPairs[requestID]
h.conn.RemoveStreams(pair.dataStream, pair.errorStream)
}
delete(h.streamPairs, requestID) delete(h.streamPairs, requestID)
} }

View File

@ -26,8 +26,10 @@ import (
dockertypes "github.com/docker/docker/api/types" dockertypes "github.com/docker/docker/api/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/tools/remotecommand"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
) )
@ -106,7 +108,7 @@ func (*NativeExecHandler) ExecInContainer(ctx context.Context, client libdocker.
ExecStarted: execStarted, ExecStarted: execStarted,
} }
if timeout > 0 { if timeout > 0 && utilfeature.DefaultFeatureGate.Enabled(features.ExecProbeTimeout) {
var cancel context.CancelFunc var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, timeout) ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel() defer cancel()

View File

@ -124,9 +124,6 @@ const (
// Max amount of time to wait for the container runtime to come up. // Max amount of time to wait for the container runtime to come up.
maxWaitForContainerRuntime = 30 * time.Second maxWaitForContainerRuntime = 30 * time.Second
// Max amount of time to wait for node list/watch to initially sync
maxWaitForAPIServerSync = 10 * time.Second
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed. // nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
nodeStatusUpdateRetry = 5 nodeStatusUpdateRetry = 5
@ -257,7 +254,7 @@ type DockerOptions struct {
// makePodSourceConfig creates a config.PodConfig from the given // makePodSourceConfig creates a config.PodConfig from the given
// KubeletConfiguration or returns an error. // KubeletConfiguration or returns an error.
func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName) (*config.PodConfig, error) { func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName, nodeHasSynced func() bool) (*config.PodConfig, error) {
manifestURLHeader := make(http.Header) manifestURLHeader := make(http.Header)
if len(kubeCfg.StaticPodURLHeader) > 0 { if len(kubeCfg.StaticPodURLHeader) > 0 {
for k, v := range kubeCfg.StaticPodURLHeader { for k, v := range kubeCfg.StaticPodURLHeader {
@ -283,8 +280,8 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku
} }
if kubeDeps.KubeClient != nil { if kubeDeps.KubeClient != nil {
klog.InfoS("Watching apiserver") klog.InfoS("Adding apiserver pod source")
config.NewSourceApiserver(kubeDeps.KubeClient, nodeName, cfg.Channel(kubetypes.ApiserverSource)) config.NewSourceApiserver(kubeDeps.KubeClient, nodeName, nodeHasSynced, cfg.Channel(kubetypes.ApiserverSource))
} }
return cfg, nil return cfg, nil
} }
@ -390,9 +387,32 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
} }
} }
var nodeHasSynced cache.InformerSynced
var nodeLister corelisters.NodeLister
// If kubeClient == nil, we are running in standalone mode (i.e. no API servers)
// If not nil, we are running as part of a cluster and should sync w/API
if kubeDeps.KubeClient != nil {
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.Set{metav1.ObjectNameField: string(nodeName)}.String()
}))
nodeLister = kubeInformers.Core().V1().Nodes().Lister()
nodeHasSynced = func() bool {
return kubeInformers.Core().V1().Nodes().Informer().HasSynced()
}
kubeInformers.Start(wait.NeverStop)
klog.InfoS("Attempting to sync node with API server")
} else {
// we don't have a client to sync!
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
nodeLister = corelisters.NewNodeLister(nodeIndexer)
nodeHasSynced = func() bool { return true }
klog.InfoS("Kubelet is running in standalone mode, will skip API server sync")
}
if kubeDeps.PodConfig == nil { if kubeDeps.PodConfig == nil {
var err error var err error
kubeDeps.PodConfig, err = makePodSourceConfig(kubeCfg, kubeDeps, nodeName) kubeDeps.PodConfig, err = makePodSourceConfig(kubeCfg, kubeDeps, nodeName, nodeHasSynced)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -433,8 +453,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
var serviceLister corelisters.ServiceLister var serviceLister corelisters.ServiceLister
var serviceHasSynced cache.InformerSynced var serviceHasSynced cache.InformerSynced
// If kubeClient == nil, we are running in standalone mode (i.e. no API servers)
// If not nil, we are running as part of a cluster and should sync w/API
if kubeDeps.KubeClient != nil { if kubeDeps.KubeClient != nil {
kubeInformers := informers.NewSharedInformerFactory(kubeDeps.KubeClient, 0) kubeInformers := informers.NewSharedInformerFactory(kubeDeps.KubeClient, 0)
serviceLister = kubeInformers.Core().V1().Services().Lister() serviceLister = kubeInformers.Core().V1().Services().Lister()
@ -446,31 +464,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
serviceHasSynced = func() bool { return true } serviceHasSynced = func() bool { return true }
} }
var nodeHasSynced cache.InformerSynced
var nodeLister corelisters.NodeLister
if kubeDeps.KubeClient != nil {
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.Set{metav1.ObjectNameField: string(nodeName)}.String()
}))
nodeLister = kubeInformers.Core().V1().Nodes().Lister()
nodeHasSynced = func() bool {
if kubeInformers.Core().V1().Nodes().Informer().HasSynced() {
return true
}
klog.InfoS("Kubelet nodes not sync")
return false
}
kubeInformers.Start(wait.NeverStop)
klog.InfoS("Kubelet client is not nil")
} else {
// we dont have a client to sync!
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
nodeLister = corelisters.NewNodeLister(nodeIndexer)
nodeHasSynced = func() bool { return true }
klog.InfoS("Kubelet client is nil")
}
// construct a node reference used for events // construct a node reference used for events
nodeRef := &v1.ObjectReference{ nodeRef := &v1.ObjectReference{
Kind: "Node", Kind: "Node",

View File

@ -22,7 +22,6 @@ import (
"io/ioutil" "io/ioutil"
"net" "net"
"path/filepath" "path/filepath"
"time"
cadvisorapiv1 "github.com/google/cadvisor/info/v1" cadvisorapiv1 "github.com/google/cadvisor/info/v1"
cadvisorv2 "github.com/google/cadvisor/info/v2" cadvisorv2 "github.com/google/cadvisor/info/v2"
@ -33,7 +32,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -237,15 +235,6 @@ func (kl *Kubelet) GetNode() (*v1.Node, error) {
if kl.kubeClient == nil { if kl.kubeClient == nil {
return kl.initialNode(context.TODO()) return kl.initialNode(context.TODO())
} }
// if we have a valid kube client, we wait for initial lister to sync
if !kl.nodeHasSynced() {
err := wait.PollImmediate(time.Second, maxWaitForAPIServerSync, func() (bool, error) {
return kl.nodeHasSynced(), nil
})
if err != nil {
return nil, fmt.Errorf("nodes have not yet been read at least once, cannot construct node object")
}
}
return kl.nodeLister.Get(string(kl.nodeName)) return kl.nodeLister.Get(string(kl.nodeName))
} }
@ -256,7 +245,7 @@ func (kl *Kubelet) GetNode() (*v1.Node, error) {
// zero capacity, and the default labels. // zero capacity, and the default labels.
func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) { func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) {
if kl.kubeClient != nil { if kl.kubeClient != nil {
if n, err := kl.GetNode(); err == nil { if n, err := kl.nodeLister.Get(string(kl.nodeName)); err == nil {
return n, nil return n, nil
} }
} }

View File

@ -687,7 +687,8 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, ru
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, container.Name) killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, container.Name)
if err := m.killContainer(pod, container.ID, container.Name, "", reasonUnknown, gracePeriodOverride); err != nil { if err := m.killContainer(pod, container.ID, container.Name, "", reasonUnknown, gracePeriodOverride); err != nil {
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error()) killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
klog.ErrorS(err, "Kill container failed", "pod", klog.KObj(pod), "podUID", pod.UID, // Use runningPod for logging as the pod passed in could be *nil*.
klog.ErrorS(err, "Kill container failed", "pod", klog.KRef(runningPod.Namespace, runningPod.Name), "podUID", runningPod.ID,
"containerName", container.Name, "containerID", container.ID) "containerName", container.Name, "containerID", container.ID)
} }
containerResults <- killContainerResult containerResults <- killContainerResult

View File

@ -250,8 +250,9 @@ func (w *worker) doProbe() (keepGoing bool) {
if c.Started != nil && *c.Started { if c.Started != nil && *c.Started {
// Stop probing for startup once container has started. // Stop probing for startup once container has started.
// we keep it running to make sure it will work for restarted container.
if w.probeType == startup { if w.probeType == startup {
return false return true
} }
} else { } else {
// Disable other probes until container has started. // Disable other probes until container has started.

View File

@ -553,7 +553,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
return nil, nil, "", err return nil, nil, "", err
} }
if migratable { if migratable {
volumeSpec, err = csimigration.TranslateInTreeSpecToCSI(volumeSpec, dswp.intreeToCSITranslator) volumeSpec, err = csimigration.TranslateInTreeSpecToCSI(volumeSpec, pod.Namespace, dswp.intreeToCSITranslator)
if err != nil { if err != nil {
return nil, nil, "", err return nil, nil, "", err
} }
@ -595,7 +595,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
return nil, nil, "", err return nil, nil, "", err
} }
if migratable { if migratable {
spec, err = csimigration.TranslateInTreeSpecToCSI(spec, dswp.intreeToCSITranslator) spec, err = csimigration.TranslateInTreeSpecToCSI(spec, pod.Namespace, dswp.intreeToCSITranslator)
if err != nil { if err != nil {
return nil, nil, "", err return nil, nil, "", err
} }

View File

@ -216,3 +216,31 @@ func endpointsIPFamily(endpoints *v1.Endpoints) (*v1.IPFamily, error) {
return &ipv4, nil return &ipv4, nil
} }
// OnNodeAdd is called whenever creation of new node object is observed.
func (proxier *metaProxier) OnNodeAdd(node *v1.Node) {
proxier.ipv4Proxier.OnNodeAdd(node)
proxier.ipv6Proxier.OnNodeAdd(node)
}
// OnNodeUpdate is called whenever modification of an existing
// node object is observed.
func (proxier *metaProxier) OnNodeUpdate(oldNode, node *v1.Node) {
proxier.ipv4Proxier.OnNodeUpdate(oldNode, node)
proxier.ipv6Proxier.OnNodeUpdate(oldNode, node)
}
// OnNodeDelete is called whenever deletion of an existing node
// object is observed.
func (proxier *metaProxier) OnNodeDelete(node *v1.Node) {
proxier.ipv4Proxier.OnNodeDelete(node)
proxier.ipv6Proxier.OnNodeDelete(node)
}
// OnNodeSynced is called once all the initial event handlers were
// called and the state is fully propagated to local cache.
func (proxier *metaProxier) OnNodeSynced() {
proxier.ipv4Proxier.OnNodeSynced()
proxier.ipv6Proxier.OnNodeSynced()
}

View File

@ -49,15 +49,15 @@ func FilterEndpoints(endpoints []Endpoint, svcInfo ServicePort, nodeLabels map[s
// filterEndpointsWithHints provides filtering based on the hints included in // filterEndpointsWithHints provides filtering based on the hints included in
// EndpointSlices. If any of the following are true, the full list of endpoints // EndpointSlices. If any of the following are true, the full list of endpoints
// will be returned without any filtering: // will be returned without any filtering:
// * The AnnotationTopologyAwareHints annotation is not set to "auto" for this // * The AnnotationTopologyAwareHints annotation is not set to "Auto" for this
// Service. // Service.
// * No zone is specified in node labels. // * No zone is specified in node labels.
// * No endpoints for this Service have a hint pointing to the zone this // * No endpoints for this Service have a hint pointing to the zone this
// instance of kube-proxy is running in. // instance of kube-proxy is running in.
// * One or more endpoints for this Service do not have hints specified. // * One or more endpoints for this Service do not have hints specified.
func filterEndpointsWithHints(endpoints []Endpoint, hintsAnnotation string, nodeLabels map[string]string) []Endpoint { func filterEndpointsWithHints(endpoints []Endpoint, hintsAnnotation string, nodeLabels map[string]string) []Endpoint {
if hintsAnnotation != "auto" { if hintsAnnotation != "Auto" && hintsAnnotation != "auto" {
if hintsAnnotation != "" && hintsAnnotation != "disabled" { if hintsAnnotation != "" && hintsAnnotation != "Disabled" && hintsAnnotation != "disabled" {
klog.Warningf("Skipping topology aware endpoint filtering since Service has unexpected value for %s annotation: %s", v1.AnnotationTopologyAwareHints, hintsAnnotation) klog.Warningf("Skipping topology aware endpoint filtering since Service has unexpected value for %s annotation: %s", v1.AnnotationTopologyAwareHints, hintsAnnotation)
} }
return endpoints return endpoints

View File

@ -198,9 +198,10 @@ func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
} }
if shareName == "" { if shareName == "" {
// File share name has a length limit of 63, and it cannot contain two consecutive '-'s. // File share name has a length limit of 63, it cannot contain two consecutive '-'s, and all letters must be lower case.
name := util.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63) name := util.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63)
shareName = strings.Replace(name, "--", "-", -1) shareName = strings.Replace(name, "--", "-", -1)
shareName = strings.ToLower(shareName)
} }
if resourceGroup == "" { if resourceGroup == "" {

View File

@ -122,19 +122,19 @@ func (pm PluginManager) IsMigratable(spec *volume.Spec) (bool, error) {
// from references to in-tree plugins to migrated CSI plugins // from references to in-tree plugins to migrated CSI plugins
type InTreeToCSITranslator interface { type InTreeToCSITranslator interface {
TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error)
TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error)
} }
// TranslateInTreeSpecToCSI translates a volume spec (either PV or inline volume) // TranslateInTreeSpecToCSI translates a volume spec (either PV or inline volume)
// supported by an in-tree plugin to CSI // supported by an in-tree plugin to CSI
func TranslateInTreeSpecToCSI(spec *volume.Spec, translator InTreeToCSITranslator) (*volume.Spec, error) { func TranslateInTreeSpecToCSI(spec *volume.Spec, podNamespace string, translator InTreeToCSITranslator) (*volume.Spec, error) {
var csiPV *v1.PersistentVolume var csiPV *v1.PersistentVolume
var err error var err error
inlineVolume := false inlineVolume := false
if spec.PersistentVolume != nil { if spec.PersistentVolume != nil {
csiPV, err = translator.TranslateInTreePVToCSI(spec.PersistentVolume) csiPV, err = translator.TranslateInTreePVToCSI(spec.PersistentVolume)
} else if spec.Volume != nil { } else if spec.Volume != nil {
csiPV, err = translator.TranslateInTreeInlineVolumeToCSI(spec.Volume) csiPV, err = translator.TranslateInTreeInlineVolumeToCSI(spec.Volume, podNamespace)
inlineVolume = true inlineVolume = true
} else { } else {
err = errors.New("not a valid volume spec") err = errors.New("not a valid volume spec")

View File

@ -58,7 +58,7 @@ type InTreeToCSITranslator interface {
GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error) GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error)
GetCSINameFromInTreeName(pluginName string) (string, error) GetCSINameFromInTreeName(pluginName string) (string, error)
TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error)
TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error)
} }
var _ OperationGenerator = &operationGenerator{} var _ OperationGenerator = &operationGenerator{}

View File

@ -198,6 +198,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "ephemeral-volume-controller"}, ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "ephemeral-volume-controller"},
Rules: []rbacv1.PolicyRule{ Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(), rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/finalizers").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), rbacv1helpers.NewRule("get", "list", "watch", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
eventsRule(), eventsRule(),
}, },

View File

@ -479,7 +479,7 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro
az.Config = *config az.Config = *config
az.Environment = *env az.Environment = *env
az.ResourceRequestBackoff = resourceRequestBackoff az.ResourceRequestBackoff = resourceRequestBackoff
az.metadata, err = NewInstanceMetadataService(metadataURL) az.metadata, err = NewInstanceMetadataService(imdsServer)
if err != nil { if err != nil {
return err return err
} }

View File

@ -275,6 +275,9 @@ func (az *Cloud) ListLB(service *v1.Service) ([]network.LoadBalancer, error) {
rgName := az.getLoadBalancerResourceGroup() rgName := az.getLoadBalancerResourceGroup()
allLBs, rerr := az.LoadBalancerClient.List(ctx, rgName) allLBs, rerr := az.LoadBalancerClient.List(ctx, rgName)
if rerr != nil { if rerr != nil {
if rerr.IsNotFound() {
return nil, nil
}
az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", rerr.Error().Error()) az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", rerr.Error().Error())
klog.Errorf("LoadBalancerClient.List(%v) failure with err=%v", rgName, rerr) klog.Errorf("LoadBalancerClient.List(%v) failure with err=%v", rgName, rerr)
return nil, rerr.Error() return nil, rerr.Error()
@ -290,6 +293,9 @@ func (az *Cloud) ListPIP(service *v1.Service, pipResourceGroup string) ([]networ
allPIPs, rerr := az.PublicIPAddressesClient.List(ctx, pipResourceGroup) allPIPs, rerr := az.PublicIPAddressesClient.List(ctx, pipResourceGroup)
if rerr != nil { if rerr != nil {
if rerr.IsNotFound() {
return nil, nil
}
az.Event(service, v1.EventTypeWarning, "ListPublicIPs", rerr.Error().Error()) az.Event(service, v1.EventTypeWarning, "ListPublicIPs", rerr.Error().Error())
klog.Errorf("PublicIPAddressesClient.List(%v) failure with err=%v", pipResourceGroup, rerr) klog.Errorf("PublicIPAddressesClient.List(%v) failure with err=%v", pipResourceGroup, rerr)
return nil, rerr.Error() return nil, rerr.Error()

View File

@ -25,13 +25,18 @@ import (
"net/http" "net/http"
"time" "time"
"k8s.io/klog/v2"
azcache "k8s.io/legacy-cloud-providers/azure/cache" azcache "k8s.io/legacy-cloud-providers/azure/cache"
) )
const ( const (
metadataCacheTTL = time.Minute metadataCacheTTL = time.Minute
metadataCacheKey = "InstanceMetadata" metadataCacheKey = "InstanceMetadata"
metadataURL = "http://169.254.169.254/metadata/instance" imdsInstanceAPIVersion = "2019-03-11"
imdsLoadBalancerAPIVersion = "2020-10-01"
imdsServer = "http://169.254.169.254"
imdsInstanceURI = "/metadata/instance"
imdsLoadBalancerURI = "/metadata/loadbalancer"
) )
// NetworkMetadata contains metadata about an instance's network // NetworkMetadata contains metadata about an instance's network
@ -86,19 +91,35 @@ type InstanceMetadata struct {
Network *NetworkMetadata `json:"network,omitempty"` Network *NetworkMetadata `json:"network,omitempty"`
} }
// PublicIPMetadata represents the public IP metadata.
type PublicIPMetadata struct {
FrontendIPAddress string `json:"frontendIpAddress,omitempty"`
PrivateIPAddress string `json:"privateIpAddress,omitempty"`
}
// LoadbalancerProfile represents load balancer profile in IMDS.
type LoadbalancerProfile struct {
PublicIPAddresses []PublicIPMetadata `json:"publicIpAddresses,omitempty"`
}
// LoadBalancerMetadata represents load balancer metadata.
type LoadBalancerMetadata struct {
LoadBalancer *LoadbalancerProfile `json:"loadbalancer,omitempty"`
}
// InstanceMetadataService knows how to query the Azure instance metadata server. // InstanceMetadataService knows how to query the Azure instance metadata server.
type InstanceMetadataService struct { type InstanceMetadataService struct {
metadataURL string imdsServer string
imsCache *azcache.TimedCache imsCache *azcache.TimedCache
} }
// NewInstanceMetadataService creates an instance of the InstanceMetadataService accessor object. // NewInstanceMetadataService creates an instance of the InstanceMetadataService accessor object.
func NewInstanceMetadataService(metadataURL string) (*InstanceMetadataService, error) { func NewInstanceMetadataService(idmsServer string) (*InstanceMetadataService, error) {
ims := &InstanceMetadataService{ ims := &InstanceMetadataService{
metadataURL: metadataURL, imdsServer: idmsServer,
} }
imsCache, err := azcache.NewTimedcache(metadataCacheTTL, ims.getInstanceMetadata) imsCache, err := azcache.NewTimedcache(metadataCacheTTL, ims.getMetadata)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -107,8 +128,52 @@ func NewInstanceMetadataService(metadataURL string) (*InstanceMetadataService, e
return ims, nil return ims, nil
} }
func (ims *InstanceMetadataService) getInstanceMetadata(key string) (interface{}, error) { func (ims *InstanceMetadataService) getMetadata(key string) (interface{}, error) {
req, err := http.NewRequest("GET", ims.metadataURL, nil) instanceMetadata, err := ims.getInstanceMetadata(key)
if err != nil {
return nil, err
}
if instanceMetadata.Network != nil && len(instanceMetadata.Network.Interface) > 0 {
netInterface := instanceMetadata.Network.Interface[0]
if (len(netInterface.IPV4.IPAddress) > 0 && len(netInterface.IPV4.IPAddress[0].PublicIP) > 0) ||
(len(netInterface.IPV6.IPAddress) > 0 && len(netInterface.IPV6.IPAddress[0].PublicIP) > 0) {
// Return if public IP address has already part of instance metadata.
return instanceMetadata, nil
}
loadBalancerMetadata, err := ims.getLoadBalancerMetadata()
if err != nil || loadBalancerMetadata == nil || loadBalancerMetadata.LoadBalancer == nil {
// Log a warning since loadbalancer metadata may not be available when the VM
// is not in standard LoadBalancer backend address pool.
klog.V(4).Infof("Warning: failed to get loadbalancer metadata: %v", err)
return instanceMetadata, nil
}
publicIPs := loadBalancerMetadata.LoadBalancer.PublicIPAddresses
if len(netInterface.IPV4.IPAddress) > 0 && len(netInterface.IPV4.IPAddress[0].PrivateIP) > 0 {
for _, pip := range publicIPs {
if pip.PrivateIPAddress == netInterface.IPV4.IPAddress[0].PrivateIP {
netInterface.IPV4.IPAddress[0].PublicIP = pip.FrontendIPAddress
break
}
}
}
if len(netInterface.IPV6.IPAddress) > 0 && len(netInterface.IPV6.IPAddress[0].PrivateIP) > 0 {
for _, pip := range publicIPs {
if pip.PrivateIPAddress == netInterface.IPV6.IPAddress[0].PrivateIP {
netInterface.IPV6.IPAddress[0].PublicIP = pip.FrontendIPAddress
break
}
}
}
}
return instanceMetadata, nil
}
func (ims *InstanceMetadataService) getInstanceMetadata(key string) (*InstanceMetadata, error) {
req, err := http.NewRequest("GET", ims.imdsServer+imdsInstanceURI, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -117,7 +182,7 @@ func (ims *InstanceMetadataService) getInstanceMetadata(key string) (interface{}
q := req.URL.Query() q := req.URL.Query()
q.Add("format", "json") q.Add("format", "json")
q.Add("api-version", "2019-03-11") q.Add("api-version", imdsInstanceAPIVersion)
req.URL.RawQuery = q.Encode() req.URL.RawQuery = q.Encode()
client := &http.Client{} client := &http.Client{}
@ -145,6 +210,44 @@ func (ims *InstanceMetadataService) getInstanceMetadata(key string) (interface{}
return &obj, nil return &obj, nil
} }
func (ims *InstanceMetadataService) getLoadBalancerMetadata() (*LoadBalancerMetadata, error) {
req, err := http.NewRequest("GET", ims.imdsServer+imdsLoadBalancerURI, nil)
if err != nil {
return nil, err
}
req.Header.Add("Metadata", "True")
req.Header.Add("User-Agent", "golang/kubernetes-cloud-provider")
q := req.URL.Query()
q.Add("format", "json")
q.Add("api-version", imdsLoadBalancerAPIVersion)
req.URL.RawQuery = q.Encode()
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failure of getting loadbalancer metadata with response %q", resp.Status)
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
obj := LoadBalancerMetadata{}
err = json.Unmarshal(data, &obj)
if err != nil {
return nil, err
}
return &obj, nil
}
// GetMetadata gets instance metadata from cache. // GetMetadata gets instance metadata from cache.
// crt determines if we can get data from stalled cache/need fresh if cache expired. // crt determines if we can get data from stalled cache/need fresh if cache expired.
func (ims *InstanceMetadataService) GetMetadata(crt azcache.AzureCacheReadType) (*InstanceMetadata, error) { func (ims *InstanceMetadataService) GetMetadata(crt azcache.AzureCacheReadType) (*InstanceMetadata, error) {

View File

@ -650,11 +650,19 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
serviceName := getServiceName(service) serviceName := getServiceName(service)
var changed bool
if existsPip { if existsPip {
// ensure that the service tag is good // ensure that the service tag is good for managed pips
changed, err := bindServicesToPIP(&pip, []string{serviceName}, false) owns, isUserAssignedPIP := serviceOwnsPublicIP(service, &pip, clusterName)
if err != nil { if owns && !isUserAssignedPIP {
return nil, err changed, err = bindServicesToPIP(&pip, []string{serviceName}, false)
if err != nil {
return nil, err
}
}
if pip.Tags == nil {
pip.Tags = make(map[string]*string)
} }
// return if pip exist and dns label is the same // return if pip exist and dns label is the same
@ -2091,7 +2099,12 @@ func deduplicate(collection *[]string) *[]string {
} }
// Determine if we should release existing owned public IPs // Determine if we should release existing owned public IPs
func shouldReleaseExistingOwnedPublicIP(existingPip *network.PublicIPAddress, lbShouldExist, lbIsInternal bool, desiredPipName, svcName string, ipTagRequest serviceIPTagRequest) bool { func shouldReleaseExistingOwnedPublicIP(existingPip *network.PublicIPAddress, lbShouldExist, lbIsInternal, isUserAssignedPIP bool, desiredPipName string, ipTagRequest serviceIPTagRequest) bool {
// skip deleting user created pip
if isUserAssignedPIP {
return false
}
// Latch some variables for readability purposes. // Latch some variables for readability purposes.
pipName := *(*existingPip).Name pipName := *(*existingPip).Name
@ -2214,9 +2227,10 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbNa
// Now, let's perform additional analysis to determine if we should release the public ips we have found. // Now, let's perform additional analysis to determine if we should release the public ips we have found.
// We can only let them go if (a) they are owned by this service and (b) they meet the criteria for deletion. // We can only let them go if (a) they are owned by this service and (b) they meet the criteria for deletion.
if serviceOwnsPublicIP(&pip, clusterName, serviceName) { owns, isUserAssignedPIP := serviceOwnsPublicIP(service, &pip, clusterName)
if owns {
var dirtyPIP, toBeDeleted bool var dirtyPIP, toBeDeleted bool
if !wantLb { if !wantLb && !isUserAssignedPIP {
klog.V(2).Infof("reconcilePublicIP for service(%s): unbinding the service from pip %s", serviceName, *pip.Name) klog.V(2).Infof("reconcilePublicIP for service(%s): unbinding the service from pip %s", serviceName, *pip.Name)
err = unbindServiceFromPIP(&pip, serviceName) err = unbindServiceFromPIP(&pip, serviceName)
if err != nil { if err != nil {
@ -2228,7 +2242,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbNa
if changed { if changed {
dirtyPIP = true dirtyPIP = true
} }
if shouldReleaseExistingOwnedPublicIP(&pip, wantLb, isInternal, desiredPipName, serviceName, serviceIPTagRequest) { if shouldReleaseExistingOwnedPublicIP(&pip, wantLb, isInternal, isUserAssignedPIP, desiredPipName, serviceIPTagRequest) {
// Then, release the public ip // Then, release the public ip
pipsToBeDeleted = append(pipsToBeDeleted, &pip) pipsToBeDeleted = append(pipsToBeDeleted, &pip)
@ -2549,26 +2563,55 @@ func getServiceTags(service *v1.Service) []string {
return nil return nil
} }
func serviceOwnsPublicIP(pip *network.PublicIPAddress, clusterName, serviceName string) bool { // serviceOwnsPublicIP checks if the service owns the pip and if the pip is user-created.
if pip != nil && pip.Tags != nil { // The pip is user-created if and only if there is no service tags.
// The service owns the pip if:
// 1. The serviceName is included in the service tags of a system-created pip.
// 2. The service.Spec.LoadBalancerIP matches the IP address of a user-created pip.
func serviceOwnsPublicIP(service *v1.Service, pip *network.PublicIPAddress, clusterName string) (bool, bool) {
if service == nil || pip == nil {
klog.Warningf("serviceOwnsPublicIP: nil service or public IP")
return false, false
}
if pip.PublicIPAddressPropertiesFormat == nil || to.String(pip.IPAddress) == "" {
klog.Warningf("serviceOwnsPublicIP: empty pip.IPAddress")
return false, false
}
serviceName := getServiceName(service)
if pip.Tags != nil {
serviceTag := pip.Tags[serviceTagKey] serviceTag := pip.Tags[serviceTagKey]
clusterTag := pip.Tags[clusterNameKey] clusterTag := pip.Tags[clusterNameKey]
if serviceTag != nil && isSVCNameInPIPTag(*serviceTag, serviceName) { // if there is no service tag on the pip, it is user-created pip
// Backward compatible for clusters upgraded from old releases. if to.String(serviceTag) == "" {
// In such case, only "service" tag is set. return strings.EqualFold(to.String(pip.IPAddress), service.Spec.LoadBalancerIP), true
if clusterTag == nil { }
return true
}
// If cluster name tag is set, then return true if it matches. if serviceTag != nil {
if *clusterTag == clusterName { // if there is service tag on the pip, it is system-created pip
return true if isSVCNameInPIPTag(*serviceTag, serviceName) {
// Backward compatible for clusters upgraded from old releases.
// In such case, only "service" tag is set.
if clusterTag == nil {
return true, false
}
// If cluster name tag is set, then return true if it matches.
if *clusterTag == clusterName {
return true, false
}
} else {
// if the service is not included in te tags of the system-created pip, check the ip address
// this could happen for secondary services
return strings.EqualFold(to.String(pip.IPAddress), service.Spec.LoadBalancerIP), false
} }
} }
} }
return false return false, false
} }
func isSVCNameInPIPTag(tag, svcName string) bool { func isSVCNameInPIPTag(tag, svcName string) bool {

View File

@ -122,6 +122,10 @@ func removePathIfNotMountPoint(mountPath string, mounter Interface, extensiveMou
} }
if err != nil { if err != nil {
if os.IsNotExist(err) {
klog.V(4).Infof("%q does not exist", mountPath)
return true, nil
}
return notMnt, err return notMnt, err
} }

View File

@ -52,7 +52,7 @@ func IsCorruptedMnt(err error) bool {
underlyingError = pe.Err underlyingError = pe.Err
} }
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES || underlyingError == syscall.EHOSTDOWN
} }
// MountInfo represents a single line in /proc/<pid>/mountinfo. // MountInfo represents a single line in /proc/<pid>/mountinfo.

View File

@ -38,7 +38,8 @@ import (
// ERROR_BAD_NET_NAME = 67 // ERROR_BAD_NET_NAME = 67
// ERROR_SESSION_CREDENTIAL_CONFLICT = 1219 // ERROR_SESSION_CREDENTIAL_CONFLICT = 1219
// ERROR_LOGON_FAILURE = 1326 // ERROR_LOGON_FAILURE = 1326
var errorNoList = [...]int{53, 54, 59, 64, 65, 66, 67, 1219, 1326} // WSAEHOSTDOWN = 10064
var errorNoList = [...]int{53, 54, 59, 64, 65, 66, 67, 1219, 1326, 10064}
// IsCorruptedMnt return true if err is about corrupted mount point // IsCorruptedMnt return true if err is about corrupted mount point
func IsCorruptedMnt(err error) bool { func IsCorruptedMnt(err error) bool {

View File

@ -20,6 +20,8 @@ package mount
import ( import (
"fmt" "fmt"
"strconv"
"strings"
"k8s.io/klog/v2" "k8s.io/klog/v2"
utilexec "k8s.io/utils/exec" utilexec "k8s.io/utils/exec"
@ -99,3 +101,117 @@ func (resizefs *ResizeFs) btrfsResize(deviceMountPath string) (bool, error) {
resizeError := fmt.Errorf("resize of device %s failed: %v. btrfs output: %s", deviceMountPath, err, string(output)) resizeError := fmt.Errorf("resize of device %s failed: %v. btrfs output: %s", deviceMountPath, err, string(output))
return false, resizeError return false, resizeError
} }
func (resizefs *ResizeFs) NeedResize(devicePath string, deviceMountPath string) (bool, error) {
deviceSize, err := resizefs.getDeviceSize(devicePath)
if err != nil {
return false, err
}
var fsSize, blockSize uint64
format, err := getDiskFormat(resizefs.exec, devicePath)
if err != nil {
formatErr := fmt.Errorf("ResizeFS.Resize - error checking format for device %s: %v", devicePath, err)
return false, formatErr
}
// If disk has no format, there is no need to resize the disk because mkfs.*
// by default will use whole disk anyways.
if format == "" {
return false, nil
}
klog.V(3).Infof("ResizeFs.needResize - checking mounted volume %s", devicePath)
switch format {
case "ext3", "ext4":
blockSize, fsSize, err = resizefs.getExtSize(devicePath)
klog.V(5).Infof("Ext size: filesystem size=%d, block size=%d", fsSize, blockSize)
case "xfs":
blockSize, fsSize, err = resizefs.getXFSSize(deviceMountPath)
klog.V(5).Infof("Xfs size: filesystem size=%d, block size=%d, err=%v", fsSize, blockSize, err)
default:
klog.Errorf("Not able to parse given filesystem info. fsType: %s, will not resize", format)
return false, fmt.Errorf("Could not parse fs info on given filesystem format: %s. Supported fs types are: xfs, ext3, ext4", format)
}
if err != nil {
return false, err
}
// Tolerate one block difference, just in case of rounding errors somewhere.
klog.V(5).Infof("Volume %s: device size=%d, filesystem size=%d, block size=%d", devicePath, deviceSize, fsSize, blockSize)
if deviceSize <= fsSize+blockSize {
return false, nil
}
return true, nil
}
func (resizefs *ResizeFs) getDeviceSize(devicePath string) (uint64, error) {
output, err := resizefs.exec.Command("blockdev", "--getsize64", devicePath).CombinedOutput()
outStr := strings.TrimSpace(string(output))
if err != nil {
return 0, fmt.Errorf("failed to read size of device %s: %s: %s", devicePath, err, outStr)
}
size, err := strconv.ParseUint(outStr, 10, 64)
if err != nil {
return 0, fmt.Errorf("failed to parse size of device %s %s: %s", devicePath, outStr, err)
}
return size, nil
}
func (resizefs *ResizeFs) getExtSize(devicePath string) (uint64, uint64, error) {
output, err := resizefs.exec.Command("dumpe2fs", "-h", devicePath).CombinedOutput()
if err != nil {
return 0, 0, fmt.Errorf("failed to read size of filesystem on %s: %s: %s", devicePath, err, string(output))
}
blockSize, blockCount, _ := resizefs.parseFsInfoOutput(string(output), ":", "block size", "block count")
if blockSize == 0 {
return 0, 0, fmt.Errorf("could not find block size of device %s", devicePath)
}
if blockCount == 0 {
return 0, 0, fmt.Errorf("could not find block count of device %s", devicePath)
}
return blockSize, blockSize * blockCount, nil
}
func (resizefs *ResizeFs) getXFSSize(devicePath string) (uint64, uint64, error) {
output, err := resizefs.exec.Command("xfs_io", "-c", "statfs", devicePath).CombinedOutput()
if err != nil {
return 0, 0, fmt.Errorf("failed to read size of filesystem on %s: %s: %s", devicePath, err, string(output))
}
blockSize, blockCount, _ := resizefs.parseFsInfoOutput(string(output), "=", "geom.bsize", "geom.datablocks")
if blockSize == 0 {
return 0, 0, fmt.Errorf("could not find block size of device %s", devicePath)
}
if blockCount == 0 {
return 0, 0, fmt.Errorf("could not find block count of device %s", devicePath)
}
return blockSize, blockSize * blockCount, nil
}
func (resizefs *ResizeFs) parseFsInfoOutput(cmdOutput string, spliter string, blockSizeKey string, blockCountKey string) (uint64, uint64, error) {
lines := strings.Split(cmdOutput, "\n")
var blockSize, blockCount uint64
var err error
for _, line := range lines {
tokens := strings.Split(line, spliter)
if len(tokens) != 2 {
continue
}
key, value := strings.ToLower(strings.TrimSpace(tokens[0])), strings.ToLower(strings.TrimSpace(tokens[1]))
if key == blockSizeKey {
blockSize, err = strconv.ParseUint(value, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("failed to parse block size %s: %s", value, err)
}
}
if key == blockCountKey {
blockCount, err = strconv.ParseUint(value, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("failed to parse block count %s: %s", value, err)
}
}
}
return blockSize, blockCount, err
}

113
vendor/modules.txt vendored
View File

@ -1441,7 +1441,7 @@ gopkg.in/warnings.v0
gopkg.in/yaml.v2 gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b # gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
gopkg.in/yaml.v3 gopkg.in/yaml.v3
# k8s.io/api v0.21.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.0-k3s1 # k8s.io/api v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.1-k3s1
## explicit ## explicit
k8s.io/api/admission/v1 k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1 k8s.io/api/admission/v1beta1
@ -1489,7 +1489,7 @@ k8s.io/api/scheduling/v1beta1
k8s.io/api/storage/v1 k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1 k8s.io/api/storage/v1beta1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.0-k3s1 # k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.1-k3s1
k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apihelpers
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
@ -1529,7 +1529,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
k8s.io/apiextensions-apiserver/pkg/registry/customresource k8s.io/apiextensions-apiserver/pkg/registry/customresource
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
# k8s.io/apimachinery v0.21.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.0-k3s1 # k8s.io/apimachinery v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.1-k3s1
## explicit ## explicit
k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/errors
@ -1594,7 +1594,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.21.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.0-k3s1 # k8s.io/apiserver v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.1-k3s1
## explicit ## explicit
k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/configuration
@ -1733,11 +1733,11 @@ k8s.io/apiserver/plugin/pkg/audit/webhook
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook
# k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.0-k3s1 # k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.1-k3s1
k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource k8s.io/cli-runtime/pkg/resource
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.0-k3s1 # k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.1-k3s1
## explicit ## explicit
k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1
k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1
@ -2025,7 +2025,7 @@ k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue k8s.io/client-go/util/workqueue
# k8s.io/cloud-provider v0.21.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.0-k3s1 # k8s.io/cloud-provider v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.1-k3s1
## explicit ## explicit
k8s.io/cloud-provider k8s.io/cloud-provider
k8s.io/cloud-provider/api k8s.io/cloud-provider/api
@ -2047,13 +2047,13 @@ k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/volume/helpers k8s.io/cloud-provider/volume/helpers
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.0-k3s1 # k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.1-k3s1
k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/api
k8s.io/cluster-bootstrap/token/jws k8s.io/cluster-bootstrap/token/jws
k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/token/util
k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/secrets
k8s.io/cluster-bootstrap/util/tokens k8s.io/cluster-bootstrap/util/tokens
# k8s.io/code-generator v0.19.7 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.0-k3s1 # k8s.io/code-generator v0.19.7 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.1-k3s1
k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/args
k8s.io/code-generator/cmd/client-gen/generators k8s.io/code-generator/cmd/client-gen/generators
k8s.io/code-generator/cmd/client-gen/generators/fake k8s.io/code-generator/cmd/client-gen/generators/fake
@ -2068,7 +2068,7 @@ k8s.io/code-generator/cmd/lister-gen/args
k8s.io/code-generator/cmd/lister-gen/generators k8s.io/code-generator/cmd/lister-gen/generators
k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/namer
k8s.io/code-generator/pkg/util k8s.io/code-generator/pkg/util
# k8s.io/component-base v0.21.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.0-k3s1 # k8s.io/component-base v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.1-k3s1
## explicit ## explicit
k8s.io/component-base/cli/flag k8s.io/component-base/cli/flag
k8s.io/component-base/cli/globalflag k8s.io/component-base/cli/globalflag
@ -2094,7 +2094,7 @@ k8s.io/component-base/metrics/testutil
k8s.io/component-base/term k8s.io/component-base/term
k8s.io/component-base/version k8s.io/component-base/version
k8s.io/component-base/version/verflag k8s.io/component-base/version/verflag
# k8s.io/component-helpers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.0-k3s1 # k8s.io/component-helpers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.1-k3s1
k8s.io/component-helpers/apimachinery/lease k8s.io/component-helpers/apimachinery/lease
k8s.io/component-helpers/apps/poddisruptionbudget k8s.io/component-helpers/apps/poddisruptionbudget
k8s.io/component-helpers/auth/rbac/reconciliation k8s.io/component-helpers/auth/rbac/reconciliation
@ -2103,7 +2103,7 @@ k8s.io/component-helpers/node/topology
k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1
k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/scheduling/corev1/nodeaffinity
k8s.io/component-helpers/storage/volume k8s.io/component-helpers/storage/volume
# k8s.io/controller-manager v0.21.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.0-k3s1 # k8s.io/controller-manager v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.1-k3s1
## explicit ## explicit
k8s.io/controller-manager/app k8s.io/controller-manager/app
k8s.io/controller-manager/config k8s.io/controller-manager/config
@ -2116,11 +2116,11 @@ k8s.io/controller-manager/pkg/informerfactory
k8s.io/controller-manager/pkg/leadermigration k8s.io/controller-manager/pkg/leadermigration
k8s.io/controller-manager/pkg/leadermigration/config k8s.io/controller-manager/pkg/leadermigration/config
k8s.io/controller-manager/pkg/leadermigration/options k8s.io/controller-manager/pkg/leadermigration/options
# k8s.io/cri-api v0.21.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.0-k3s1 # k8s.io/cri-api v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.1-k3s1
## explicit ## explicit
k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis
k8s.io/cri-api/pkg/apis/runtime/v1alpha2 k8s.io/cri-api/pkg/apis/runtime/v1alpha2
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.0-k3s1 # k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.1-k3s1
k8s.io/csi-translation-lib k8s.io/csi-translation-lib
k8s.io/csi-translation-lib/plugins k8s.io/csi-translation-lib/plugins
# k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 # k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027
@ -2138,7 +2138,7 @@ k8s.io/heapster/metrics/api/v1/types
k8s.io/klog k8s.io/klog
# k8s.io/klog/v2 v2.8.0 # k8s.io/klog/v2 v2.8.0
k8s.io/klog/v2 k8s.io/klog/v2
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.0-k3s1 # k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.1-k3s1
k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration
k8s.io/kube-aggregator/pkg/apis/apiregistration/install k8s.io/kube-aggregator/pkg/apis/apiregistration/install
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
@ -2166,7 +2166,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
k8s.io/kube-aggregator/pkg/registry/apiservice k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
k8s.io/kube-aggregator/pkg/registry/apiservice/rest k8s.io/kube-aggregator/pkg/registry/apiservice/rest
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.0-k3s1 # k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.1-k3s1
k8s.io/kube-controller-manager/config/v1alpha1 k8s.io/kube-controller-manager/config/v1alpha1
# k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 # k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7
k8s.io/kube-openapi/pkg/aggregator k8s.io/kube-openapi/pkg/aggregator
@ -2182,13 +2182,13 @@ k8s.io/kube-openapi/pkg/validation/spec
k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt
k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/strfmt/bson
k8s.io/kube-openapi/pkg/validation/validate k8s.io/kube-openapi/pkg/validation/validate
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.0-k3s1 # k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.1-k3s1
k8s.io/kube-proxy/config/v1alpha1 k8s.io/kube-proxy/config/v1alpha1
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.0-k3s1 # k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.1-k3s1
k8s.io/kube-scheduler/config/v1 k8s.io/kube-scheduler/config/v1
k8s.io/kube-scheduler/config/v1beta1 k8s.io/kube-scheduler/config/v1beta1
k8s.io/kube-scheduler/extender/v1 k8s.io/kube-scheduler/extender/v1
# k8s.io/kubectl v0.21.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.0-k3s1 # k8s.io/kubectl v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.1-k3s1
## explicit ## explicit
k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/apps
k8s.io/kubectl/pkg/cmd k8s.io/kubectl/pkg/cmd
@ -2267,7 +2267,7 @@ k8s.io/kubectl/pkg/util/storage
k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/templates
k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/validation k8s.io/kubectl/pkg/validation
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.0-k3s1 # k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.1-k3s1
k8s.io/kubelet/config/v1alpha1 k8s.io/kubelet/config/v1alpha1
k8s.io/kubelet/config/v1beta1 k8s.io/kubelet/config/v1beta1
k8s.io/kubelet/pkg/apis k8s.io/kubelet/pkg/apis
@ -2279,7 +2279,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1
k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1
k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1
k8s.io/kubelet/pkg/apis/stats/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1
# k8s.io/kubernetes v1.21.0 => github.com/k3s-io/kubernetes v1.21.0-k3s1 # k8s.io/kubernetes v1.21.1 => github.com/k3s-io/kubernetes v1.21.1-k3s1
## explicit ## explicit
k8s.io/kubernetes/cmd/kube-apiserver/app k8s.io/kubernetes/cmd/kube-apiserver/app
k8s.io/kubernetes/cmd/kube-apiserver/app/options k8s.io/kubernetes/cmd/kube-apiserver/app/options
@ -3007,7 +3007,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/simple
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.0-k3s1 # k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.1-k3s1
k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/aws
k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure
k8s.io/legacy-cloud-providers/azure/auth k8s.io/legacy-cloud-providers/azure/auth
@ -3050,7 +3050,7 @@ k8s.io/legacy-cloud-providers/openstack
k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere
k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.0-k3s1 # k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.1-k3s1
k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
@ -3066,7 +3066,7 @@ k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1
k8s.io/metrics/pkg/client/custom_metrics k8s.io/metrics/pkg/client/custom_metrics
k8s.io/metrics/pkg/client/custom_metrics/scheme k8s.io/metrics/pkg/client/custom_metrics/scheme
k8s.io/metrics/pkg/client/external_metrics k8s.io/metrics/pkg/client/external_metrics
# k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.0-k3s1 # k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.1-k3s1
k8s.io/mount-utils k8s.io/mount-utils
# k8s.io/utils v0.0.0-20201110183641-67b214c5f920 # k8s.io/utils v0.0.0-20201110183641-67b214c5f920
## explicit ## explicit
@ -3088,7 +3088,7 @@ k8s.io/utils/trace
# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
# sigs.k8s.io/kustomize/api v0.8.5 # sigs.k8s.io/kustomize/api v0.8.8
sigs.k8s.io/kustomize/api/builtins sigs.k8s.io/kustomize/api/builtins
sigs.k8s.io/kustomize/api/filesys sigs.k8s.io/kustomize/api/filesys
sigs.k8s.io/kustomize/api/filters/annotations sigs.k8s.io/kustomize/api/filters/annotations
@ -3109,7 +3109,6 @@ sigs.k8s.io/kustomize/api/hasher
sigs.k8s.io/kustomize/api/ifc sigs.k8s.io/kustomize/api/ifc
sigs.k8s.io/kustomize/api/image sigs.k8s.io/kustomize/api/image
sigs.k8s.io/kustomize/api/internal/accumulator sigs.k8s.io/kustomize/api/internal/accumulator
sigs.k8s.io/kustomize/api/internal/conflict
sigs.k8s.io/kustomize/api/internal/generators sigs.k8s.io/kustomize/api/internal/generators
sigs.k8s.io/kustomize/api/internal/git sigs.k8s.io/kustomize/api/internal/git
sigs.k8s.io/kustomize/api/internal/kusterr sigs.k8s.io/kustomize/api/internal/kusterr
@ -3122,7 +3121,6 @@ sigs.k8s.io/kustomize/api/internal/plugins/utils
sigs.k8s.io/kustomize/api/internal/target sigs.k8s.io/kustomize/api/internal/target
sigs.k8s.io/kustomize/api/internal/utils sigs.k8s.io/kustomize/api/internal/utils
sigs.k8s.io/kustomize/api/internal/validate sigs.k8s.io/kustomize/api/internal/validate
sigs.k8s.io/kustomize/api/internal/wrappy
sigs.k8s.io/kustomize/api/konfig sigs.k8s.io/kustomize/api/konfig
sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts
sigs.k8s.io/kustomize/api/krusty sigs.k8s.io/kustomize/api/krusty
@ -3134,14 +3132,13 @@ sigs.k8s.io/kustomize/api/resid
sigs.k8s.io/kustomize/api/resmap sigs.k8s.io/kustomize/api/resmap
sigs.k8s.io/kustomize/api/resource sigs.k8s.io/kustomize/api/resource
sigs.k8s.io/kustomize/api/types sigs.k8s.io/kustomize/api/types
# sigs.k8s.io/kustomize/kustomize/v4 v4.0.5 # sigs.k8s.io/kustomize/kustomize/v4 v4.1.2
sigs.k8s.io/kustomize/kustomize/v4/commands/build sigs.k8s.io/kustomize/kustomize/v4/commands/build
# sigs.k8s.io/kustomize/kyaml v0.10.15 # sigs.k8s.io/kustomize/kyaml v0.10.17
sigs.k8s.io/kustomize/kyaml/comments sigs.k8s.io/kustomize/kyaml/comments
sigs.k8s.io/kustomize/kyaml/errors sigs.k8s.io/kustomize/kyaml/errors
sigs.k8s.io/kustomize/kyaml/ext sigs.k8s.io/kustomize/kyaml/ext
sigs.k8s.io/kustomize/kyaml/fieldmeta sigs.k8s.io/kustomize/kyaml/fieldmeta
sigs.k8s.io/kustomize/kyaml/filtersutil
sigs.k8s.io/kustomize/kyaml/fn/runtime/container sigs.k8s.io/kustomize/kyaml/fn/runtime/container
sigs.k8s.io/kustomize/kyaml/fn/runtime/exec sigs.k8s.io/kustomize/kyaml/fn/runtime/exec
sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil
@ -3205,32 +3202,32 @@ sigs.k8s.io/yaml
# google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 # google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884
# google.golang.org/grpc => google.golang.org/grpc v1.27.1 # google.golang.org/grpc => google.golang.org/grpc v1.27.1
# gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2 # gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
# k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.0-k3s1 # k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.1-k3s1
# k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.0-k3s1 # k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.1-k3s1
# k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.0-k3s1 # k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.1-k3s1
# k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.0-k3s1 # k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.1-k3s1
# k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.0-k3s1 # k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.1-k3s1
# k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.0-k3s1 # k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.1-k3s1
# k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.0-k3s1 # k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.1-k3s1
# k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.0-k3s1 # k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.1-k3s1
# k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.0-k3s1 # k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.1-k3s1
# k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.0-k3s1 # k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.1-k3s1
# k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.0-k3s1 # k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.1-k3s1
# k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.0-k3s1 # k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.1-k3s1
# k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.0-k3s1 # k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.1-k3s1
# k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.0-k3s1 # k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.1-k3s1
# k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.0-k3s1 # k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.1-k3s1
# k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.0-k3s1 # k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.1-k3s1
# k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.0-k3s1 # k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.1-k3s1
# k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.0-k3s1 # k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.1-k3s1
# k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.0-k3s1 # k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.1-k3s1
# k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.0-k3s1 # k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.1-k3s1
# k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.21.0-k3s1 # k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.21.1-k3s1
# k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.0-k3s1 # k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.1-k3s1
# k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.0-k3s1 # k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.1-k3s1
# k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.0-k3s1 # k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.1-k3s1
# k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.21.0-k3s1 # k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.21.1-k3s1
# k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.21.0-k3s1 # k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.21.1-k3s1
# k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.21.0-k3s1 # k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.21.1-k3s1
# k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.21.0-k3s1 # k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.21.1-k3s1
# mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 # mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7

View File

@ -11,7 +11,7 @@ import (
) )
type HashTransformerPlugin struct { type HashTransformerPlugin struct {
hasher ifc.KunstructuredHasher hasher ifc.KustHasher
} }
func (p *HashTransformerPlugin) Config( func (p *HashTransformerPlugin) Config(
@ -24,7 +24,7 @@ func (p *HashTransformerPlugin) Config(
func (p *HashTransformerPlugin) Transform(m resmap.ResMap) error { func (p *HashTransformerPlugin) Transform(m resmap.ResMap) error {
for _, res := range m.Resources() { for _, res := range m.Resources() {
if res.NeedHashSuffix() { if res.NeedHashSuffix() {
h, err := p.hasher.Hash(res) h, err := res.Hash(p.hasher)
if err != nil { if err != nil {
return err return err
} }

View File

@ -6,7 +6,6 @@ package builtins
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"io"
"io/ioutil" "io/ioutil"
"os" "os"
"os/exec" "os/exec"
@ -16,7 +15,6 @@ import (
"github.com/imdario/mergo" "github.com/imdario/mergo"
"github.com/pkg/errors" "github.com/pkg/errors"
"sigs.k8s.io/kustomize/api/filesys"
"sigs.k8s.io/kustomize/api/resmap" "sigs.k8s.io/kustomize/api/resmap"
"sigs.k8s.io/kustomize/api/types" "sigs.k8s.io/kustomize/api/types"
"sigs.k8s.io/yaml" "sigs.k8s.io/yaml"
@ -25,243 +23,285 @@ import (
// HelmChartInflationGeneratorPlugin is a plugin to generate resources // HelmChartInflationGeneratorPlugin is a plugin to generate resources
// from a remote or local helm chart. // from a remote or local helm chart.
type HelmChartInflationGeneratorPlugin struct { type HelmChartInflationGeneratorPlugin struct {
h *resmap.PluginHelpers h *resmap.PluginHelpers
types.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` types.HelmGlobals
runHelmCommand func([]string) ([]byte, error) types.HelmChart
types.HelmChartArgs
tmpDir string tmpDir string
} }
var KustomizePlugin HelmChartInflationGeneratorPlugin var KustomizePlugin HelmChartInflationGeneratorPlugin
const (
valuesMergeOptionMerge = "merge"
valuesMergeOptionOverride = "override"
valuesMergeOptionReplace = "replace"
)
var legalMergeOptions = []string{
valuesMergeOptionMerge,
valuesMergeOptionOverride,
valuesMergeOptionReplace,
}
// Config uses the input plugin configurations `config` to setup the generator // Config uses the input plugin configurations `config` to setup the generator
// options // options
func (p *HelmChartInflationGeneratorPlugin) Config(h *resmap.PluginHelpers, config []byte) error { func (p *HelmChartInflationGeneratorPlugin) Config(
h *resmap.PluginHelpers, config []byte) (err error) {
if h.GeneralConfig() == nil {
return fmt.Errorf("unable to access general config")
}
if !h.GeneralConfig().HelmConfig.Enabled {
return fmt.Errorf("must specify --enable-helm")
}
if h.GeneralConfig().HelmConfig.Command == "" {
return fmt.Errorf("must specify --helm-command")
}
p.h = h p.h = h
err := yaml.Unmarshal(config, p) if err = yaml.Unmarshal(config, p); err != nil {
if err != nil { return
return err
} }
tmpDir, err := filesys.NewTmpConfirmedDir() return p.validateArgs()
if err != nil {
return err
}
p.tmpDir = string(tmpDir)
if p.ChartName == "" {
return fmt.Errorf("chartName cannot be empty")
}
if p.ChartHome == "" {
p.ChartHome = filepath.Join(p.tmpDir, "chart")
}
if p.ChartRepoName == "" {
p.ChartRepoName = "stable"
}
if p.HelmBin == "" {
p.HelmBin = "helm"
}
if p.HelmHome == "" {
p.HelmHome = filepath.Join(p.tmpDir, ".helm")
}
if p.Values == "" {
p.Values = filepath.Join(p.ChartHome, p.ChartName, "values.yaml")
}
if p.ValuesMerge == "" {
p.ValuesMerge = "override"
}
// runHelmCommand will run `helm` command with args provided. Return stdout
// and error if there is any.
p.runHelmCommand = func(args []string) ([]byte, error) {
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
cmd := exec.Command(p.HelmBin, args...)
cmd.Stdout = stdout
cmd.Stderr = stderr
cmd.Env = append(cmd.Env,
fmt.Sprintf("HELM_CONFIG_HOME=%s", p.HelmHome),
fmt.Sprintf("HELM_CACHE_HOME=%s/.cache", p.HelmHome),
fmt.Sprintf("HELM_DATA_HOME=%s/.data", p.HelmHome),
)
err := cmd.Run()
if err != nil {
return stdout.Bytes(),
errors.Wrap(
fmt.Errorf("failed to run command %s %s", p.HelmBin, strings.Join(args, " ")),
stderr.String(),
)
}
return stdout.Bytes(), nil
}
return nil
} }
// EncodeValues for writing // This uses the real file system since tmpDir may be used
func (p *HelmChartInflationGeneratorPlugin) EncodeValues(w io.Writer) error { // by the helm subprocess. Cannot use a chroot jail or fake
d, err := yaml.Marshal(p.ValuesLocal) // filesystem since we allow the user to use previously
if err != nil { // downloaded charts. This is safe since this plugin is
return err // owned by kustomize.
} func (p *HelmChartInflationGeneratorPlugin) establishTmpDir() (err error) {
_, err = w.Write(d) if p.tmpDir != "" {
if err != nil { // already done.
return err
}
return nil
}
// useValuesLocal process (merge) inflator config provided values with chart default values.yaml
func (p *HelmChartInflationGeneratorPlugin) useValuesLocal() error {
// not override, merge, none
if !(p.ValuesMerge == "none" || p.ValuesMerge == "no" || p.ValuesMerge == "false") {
var pValues []byte
var err error
if filepath.IsAbs(p.Values) {
pValues, err = ioutil.ReadFile(p.Values)
} else {
pValues, err = p.h.Loader().Load(p.Values)
}
if err != nil {
return err
}
chValues := make(map[string]interface{})
err = yaml.Unmarshal(pValues, &chValues)
if err != nil {
return err
}
if p.ValuesMerge == "override" {
err = mergo.Merge(&chValues, p.ValuesLocal, mergo.WithOverride)
if err != nil {
return err
}
}
if p.ValuesMerge == "merge" {
err = mergo.Merge(&chValues, p.ValuesLocal)
if err != nil {
return err
}
}
p.ValuesLocal = chValues
}
b, err := yaml.Marshal(p.ValuesLocal)
if err != nil {
return err
}
path, err := p.writeValuesBytes(b)
if err != nil {
return err
}
p.Values = path
return nil
}
// copyValues will copy the relative values file into the temp directory
// to avoid messing up with CWD.
func (p *HelmChartInflationGeneratorPlugin) copyValues() error {
// only copy when the values path is not absolute
if filepath.IsAbs(p.Values) {
return nil return nil
} }
// we must use use loader to read values file p.tmpDir, err = ioutil.TempDir("", "kustomize-helm-")
b, err := p.h.Loader().Load(p.Values) return err
if err != nil { }
func (p *HelmChartInflationGeneratorPlugin) validateArgs() (err error) {
if p.Name == "" {
return fmt.Errorf("chart name cannot be empty")
}
// ChartHome might be consulted by the plugin (to read
// values files below it), so it must be located under
// the loader root (unless root restrictions are
// disabled, in which case this can be an absolute path).
if p.ChartHome == "" {
p.ChartHome = "charts"
}
// The ValuesFile may be consulted by the plugin, so it must
// be under the loader root (unless root restrictions are
// disabled).
if p.ValuesFile == "" {
p.ValuesFile = filepath.Join(p.ChartHome, p.Name, "values.yaml")
}
if err = p.errIfIllegalValuesMerge(); err != nil {
return err return err
} }
path, err := p.writeValuesBytes(b)
if err != nil { // ConfigHome is not loaded by the plugin, and can be located anywhere.
return err if p.ConfigHome == "" {
if err = p.establishTmpDir(); err != nil {
return errors.Wrap(
err, "unable to create tmp dir for HELM_CONFIG_HOME")
}
p.ConfigHome = filepath.Join(p.tmpDir, "helm")
} }
p.Values = path
return nil return nil
} }
func (p *HelmChartInflationGeneratorPlugin) writeValuesBytes(b []byte) (string, error) { func (p *HelmChartInflationGeneratorPlugin) errIfIllegalValuesMerge() error {
path := filepath.Join(p.ChartHome, p.ChartName, "kustomize-values.yaml") if p.ValuesMerge == "" {
err := ioutil.WriteFile(path, b, 0644) // Use the default.
p.ValuesMerge = valuesMergeOptionOverride
return nil
}
for _, opt := range legalMergeOptions {
if p.ValuesMerge == opt {
return nil
}
}
return fmt.Errorf("valuesMerge must be one of %v", legalMergeOptions)
}
func (p *HelmChartInflationGeneratorPlugin) absChartHome() string {
if filepath.IsAbs(p.ChartHome) {
return p.ChartHome
}
return filepath.Join(p.h.Loader().Root(), p.ChartHome)
}
func (p *HelmChartInflationGeneratorPlugin) runHelmCommand(
args []string) ([]byte, error) {
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
cmd := exec.Command(p.h.GeneralConfig().HelmConfig.Command, args...)
cmd.Stdout = stdout
cmd.Stderr = stderr
env := []string{
fmt.Sprintf("HELM_CONFIG_HOME=%s", p.ConfigHome),
fmt.Sprintf("HELM_CACHE_HOME=%s/.cache", p.ConfigHome),
fmt.Sprintf("HELM_DATA_HOME=%s/.data", p.ConfigHome)}
cmd.Env = append(os.Environ(), env...)
err := cmd.Run()
if err != nil {
helm := p.h.GeneralConfig().HelmConfig.Command
err = errors.Wrap(
fmt.Errorf(
"unable to run: '%s %s' with env=%s (is '%s' installed?)",
helm, strings.Join(args, " "), env, helm),
stderr.String(),
)
}
return stdout.Bytes(), err
}
// createNewMergedValuesFile replaces/merges original values file with ValuesInline.
func (p *HelmChartInflationGeneratorPlugin) createNewMergedValuesFile() (
path string, err error) {
if p.ValuesMerge == valuesMergeOptionMerge ||
p.ValuesMerge == valuesMergeOptionOverride {
if err = p.replaceValuesInline(); err != nil {
return "", err
}
}
var b []byte
b, err = yaml.Marshal(p.ValuesInline)
if err != nil { if err != nil {
return "", err return "", err
} }
return path, nil return p.writeValuesBytes(b)
}
func (p *HelmChartInflationGeneratorPlugin) replaceValuesInline() error {
pValues, err := p.h.Loader().Load(p.ValuesFile)
if err != nil {
return err
}
chValues := make(map[string]interface{})
if err = yaml.Unmarshal(pValues, &chValues); err != nil {
return err
}
switch p.ValuesMerge {
case valuesMergeOptionOverride:
err = mergo.Merge(
&chValues, p.ValuesInline, mergo.WithOverride)
case valuesMergeOptionMerge:
err = mergo.Merge(&chValues, p.ValuesInline)
}
p.ValuesInline = chValues
return err
}
// copyValuesFile to avoid branching. TODO: get rid of this.
func (p *HelmChartInflationGeneratorPlugin) copyValuesFile() (string, error) {
b, err := p.h.Loader().Load(p.ValuesFile)
if err != nil {
return "", err
}
return p.writeValuesBytes(b)
}
// Write a absolute path file in the tmp file system.
func (p *HelmChartInflationGeneratorPlugin) writeValuesBytes(
b []byte) (string, error) {
if err := p.establishTmpDir(); err != nil {
return "", fmt.Errorf("cannot create tmp dir to write helm values")
}
path := filepath.Join(p.tmpDir, p.Name+"-kustomize-values.yaml")
return path, ioutil.WriteFile(path, b, 0644)
}
func (p *HelmChartInflationGeneratorPlugin) cleanup() {
if p.tmpDir != "" {
os.RemoveAll(p.tmpDir)
}
} }
// Generate implements generator // Generate implements generator
func (p *HelmChartInflationGeneratorPlugin) Generate() (resmap.ResMap, error) { func (p *HelmChartInflationGeneratorPlugin) Generate() (rm resmap.ResMap, err error) {
// cleanup defer p.cleanup()
defer os.RemoveAll(p.tmpDir) if err = p.checkHelmVersion(); err != nil {
// check helm version. we only support V3
err := p.checkHelmVersion()
if err != nil {
return nil, err return nil, err
} }
// pull the chart if path, exists := p.chartExistsLocally(); !exists {
if !p.checkLocalChart() { if p.Repo == "" {
_, err := p.runHelmCommand(p.getPullCommandArgs()) return nil, fmt.Errorf(
if err != nil { "no repo specified for pull, no chart found at '%s'", path)
}
if _, err := p.runHelmCommand(p.pullCommand()); err != nil {
return nil, err return nil, err
} }
} }
if len(p.ValuesInline) > 0 {
// inflator config valuesLocal p.ValuesFile, err = p.createNewMergedValuesFile()
if len(p.ValuesLocal) > 0 {
err := p.useValuesLocal()
if err != nil {
return nil, err
}
} else { } else {
err := p.copyValues() p.ValuesFile, err = p.copyValuesFile()
if err != nil {
return nil, err
}
} }
if err != nil {
// render the charts return nil, err
stdout, err := p.runHelmCommand(p.getTemplateCommandArgs()) }
var stdout []byte
stdout, err = p.runHelmCommand(p.templateCommand())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return p.h.ResmapFactory().NewResMapFromBytes(stdout) rm, err = p.h.ResmapFactory().NewResMapFromBytes(stdout)
if err == nil {
return rm, nil
}
// try to remove the contents before first "---" because
// helm may produce messages to stdout before it
stdoutStr := string(stdout)
if idx := strings.Index(stdoutStr, "---"); idx != -1 {
return p.h.ResmapFactory().NewResMapFromBytes([]byte(stdoutStr[idx:]))
}
return nil, err
} }
func (p *HelmChartInflationGeneratorPlugin) getTemplateCommandArgs() []string { func (p *HelmChartInflationGeneratorPlugin) templateCommand() []string {
args := []string{"template"} args := []string{"template"}
if p.ReleaseName != "" { if p.ReleaseName != "" {
args = append(args, p.ReleaseName) args = append(args, p.ReleaseName)
} }
args = append(args, filepath.Join(p.ChartHome, p.ChartName)) args = append(args, filepath.Join(p.absChartHome(), p.Name))
if p.ReleaseNamespace != "" { if p.ValuesFile != "" {
args = append(args, "--namespace", p.ReleaseNamespace) args = append(args, "--values", p.ValuesFile)
} }
if p.Values != "" { if p.ReleaseName == "" {
args = append(args, "--values", p.Values) // AFAICT, this doesn't work as intended due to a bug in helm.
// See https://github.com/helm/helm/issues/6019
// I've tried placing the flag before and after the name argument.
args = append(args, "--generate-name")
} }
args = append(args, p.ExtraArgs...)
return args return args
} }
func (p *HelmChartInflationGeneratorPlugin) getPullCommandArgs() []string { func (p *HelmChartInflationGeneratorPlugin) pullCommand() []string {
args := []string{"pull", "--untar", "--untardir", p.ChartHome} args := []string{
chartName := fmt.Sprintf("%s/%s", p.ChartRepoName, p.ChartName) "pull",
if p.ChartVersion != "" { "--untar",
args = append(args, "--version", p.ChartVersion) "--untardir", p.absChartHome(),
"--repo", p.Repo,
p.Name}
if p.Version != "" {
args = append(args, "--version", p.Version)
} }
if p.ChartRepoURL != "" {
args = append(args, "--repo", p.ChartRepoURL)
chartName = p.ChartName
}
args = append(args, chartName)
return args return args
} }
// checkLocalChart will return true if the chart does exist in // chartExistsLocally will return true if the chart does exist in
// local chart home. // local chart home.
func (p *HelmChartInflationGeneratorPlugin) checkLocalChart() bool { func (p *HelmChartInflationGeneratorPlugin) chartExistsLocally() (string, bool) {
path := filepath.Join(p.ChartHome, p.ChartName) path := filepath.Join(p.absChartHome(), p.Name)
s, err := os.Stat(path) s, err := os.Stat(path)
if err != nil { if err != nil {
return false return "", false
} }
return s.IsDir() return path, s.IsDir()
} }
// checkHelmVersion will return an error if the helm version is not V3 // checkHelmVersion will return an error if the helm version is not V3
@ -270,11 +310,17 @@ func (p *HelmChartInflationGeneratorPlugin) checkHelmVersion() error {
if err != nil { if err != nil {
return err return err
} }
r, err := regexp.Compile(`v\d+(\.\d+)+`) r, err := regexp.Compile(`v?\d+(\.\d+)+`)
if err != nil { if err != nil {
return err return err
} }
v := string(r.Find(stdout))[1:] v := r.FindString(string(stdout))
if v == "" {
return fmt.Errorf("cannot find version string in %s", string(stdout))
}
if v[0] == 'v' {
v = v[1:]
}
majorVersion := strings.Split(v, ".")[0] majorVersion := strings.Split(v, ".")[0]
if majorVersion != "3" { if majorVersion != "3" {
return fmt.Errorf("this plugin requires helm V3 but got v%s", v) return fmt.Errorf("this plugin requires helm V3 but got v%s", v)

View File

@ -30,20 +30,15 @@ func (p *NamespaceTransformerPlugin) Transform(m resmap.ResMap) error {
return nil return nil
} }
for _, r := range m.Resources() { for _, r := range m.Resources() {
empty, err := r.IsEmpty() if r.IsEmpty() {
if err != nil {
return err
}
if empty {
// Don't mutate empty objects? // Don't mutate empty objects?
continue continue
} }
r.StorePreviousId() r.StorePreviousId()
err = r.ApplyFilter(namespace.Filter{ if err := r.ApplyFilter(namespace.Filter{
Namespace: p.Namespace, Namespace: p.Namespace,
FsSlice: p.FieldSpecs, FsSlice: p.FieldSpecs,
}) }); err != nil {
if err != nil {
return err return err
} }
matches := m.GetMatchingResourcesByCurrentId(r.CurId().Equals) matches := m.GetMatchingResourcesByCurrentId(r.CurId().Equals)

View File

@ -28,45 +28,48 @@ func (p *PatchStrategicMergeTransformerPlugin) Config(
return fmt.Errorf("empty file path and empty patch content") return fmt.Errorf("empty file path and empty patch content")
} }
if len(p.Paths) != 0 { if len(p.Paths) != 0 {
for _, onePath := range p.Paths { patches, err := loadFromPaths(h, p.Paths)
// The following oddly attempts to interpret a path string as an
// actual patch (instead of as a path to a file containing a patch).
// All tests pass if this code is commented out. This code should
// be deleted; the user should use the Patches field which
// exists for this purpose (inline patch declaration).
res, err := h.ResmapFactory().RF().SliceFromBytes([]byte(onePath))
if err == nil {
p.loadedPatches = append(p.loadedPatches, res...)
continue
}
res, err = h.ResmapFactory().RF().SliceFromPatches(
h.Loader(), []types.PatchStrategicMerge{onePath})
if err != nil {
return err
}
p.loadedPatches = append(p.loadedPatches, res...)
}
}
if p.Patches != "" {
res, err := h.ResmapFactory().RF().SliceFromBytes([]byte(p.Patches))
if err != nil { if err != nil {
return err return err
} }
p.loadedPatches = append(p.loadedPatches, res...) p.loadedPatches = append(p.loadedPatches, patches...)
}
if p.Patches != "" {
patches, err := h.ResmapFactory().RF().SliceFromBytes([]byte(p.Patches))
if err != nil {
return err
}
p.loadedPatches = append(p.loadedPatches, patches...)
} }
if len(p.loadedPatches) == 0 { if len(p.loadedPatches) == 0 {
return fmt.Errorf( return fmt.Errorf(
"patch appears to be empty; files=%v, Patch=%s", p.Paths, p.Patches) "patch appears to be empty; files=%v, Patch=%s", p.Paths, p.Patches)
} }
// Merge the patches, looking for conflicts.
_, err = h.ResmapFactory().ConflatePatches(p.loadedPatches)
if err != nil {
return err
}
return nil return nil
} }
func loadFromPaths(
h *resmap.PluginHelpers,
paths []types.PatchStrategicMerge) (
result []*resource.Resource, err error) {
var patches []*resource.Resource
for _, path := range paths {
// For legacy reasons, attempt to treat the path string as
// actual patch content.
patches, err = h.ResmapFactory().RF().SliceFromBytes([]byte(path))
if err != nil {
// Failing that, treat it as a file path.
patches, err = h.ResmapFactory().RF().SliceFromPatches(
h.Loader(), []types.PatchStrategicMerge{path})
if err != nil {
return
}
}
result = append(result, patches...)
}
return
}
func (p *PatchStrategicMergeTransformerPlugin) Transform(m resmap.ResMap) error { func (p *PatchStrategicMergeTransformerPlugin) Transform(m resmap.ResMap) error {
for _, patch := range p.loadedPatches { for _, patch := range p.loadedPatches {
target, err := m.GetById(patch.OrgId()) target, err := m.GetById(patch.OrgId())

View File

@ -21,6 +21,7 @@ type PatchTransformerPlugin struct {
Path string `json:"path,omitempty" yaml:"path,omitempty"` Path string `json:"path,omitempty" yaml:"path,omitempty"`
Patch string `json:"patch,omitempty" yaml:"patch,omitempty"` Patch string `json:"patch,omitempty" yaml:"patch,omitempty"`
Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"` Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"`
Options map[string]bool `json:"options,omitempty" yaml:"options,omitempty"`
} }
func (p *PatchTransformerPlugin) Config( func (p *PatchTransformerPlugin) Config(
@ -60,6 +61,12 @@ func (p *PatchTransformerPlugin) Config(
} }
if errSM == nil { if errSM == nil {
p.loadedPatch = patchSM p.loadedPatch = patchSM
if p.Options["allowNameChange"] {
p.loadedPatch.SetAllowNameChange("true")
}
if p.Options["allowKindChange"] {
p.loadedPatch.SetAllowKindChange("true")
}
} else { } else {
p.decodedPatch = patchJson p.decodedPatch = patchJson
} }

View File

@ -1,7 +1,6 @@
package nameref package nameref
import ( import (
"encoding/json"
"fmt" "fmt"
"strings" "strings"
@ -11,7 +10,6 @@ import (
"sigs.k8s.io/kustomize/api/resmap" "sigs.k8s.io/kustomize/api/resmap"
"sigs.k8s.io/kustomize/api/resource" "sigs.k8s.io/kustomize/api/resource"
"sigs.k8s.io/kustomize/api/types" "sigs.k8s.io/kustomize/api/types"
"sigs.k8s.io/kustomize/kyaml/filtersutil"
"sigs.k8s.io/kustomize/kyaml/kio" "sigs.k8s.io/kustomize/kyaml/kio"
"sigs.k8s.io/kustomize/kyaml/yaml" "sigs.k8s.io/kustomize/kyaml/yaml"
) )
@ -186,11 +184,7 @@ func (f Filter) recordTheReferral(referral *resource.Resource) {
// getRoleRefGvk returns a Gvk in the roleRef field. Return error // getRoleRefGvk returns a Gvk in the roleRef field. Return error
// if the roleRef, roleRef/apiGroup or roleRef/kind is missing. // if the roleRef, roleRef/apiGroup or roleRef/kind is missing.
func getRoleRefGvk(res json.Marshaler) (*resid.Gvk, error) { func getRoleRefGvk(n *yaml.RNode) (*resid.Gvk, error) {
n, err := filtersutil.GetRNode(res)
if err != nil {
return nil, err
}
roleRef, err := n.Pipe(yaml.Lookup("roleRef")) roleRef, err := n.Pipe(yaml.Lookup("roleRef"))
if err != nil { if err != nil {
return nil, err return nil, err
@ -276,7 +270,7 @@ func (f Filter) roleRefFilter() sieveFunc {
if !strings.HasSuffix(f.NameFieldToUpdate.Path, "roleRef/name") { if !strings.HasSuffix(f.NameFieldToUpdate.Path, "roleRef/name") {
return acceptAll return acceptAll
} }
roleRefGvk, err := getRoleRefGvk(f.Referrer) roleRefGvk, err := getRoleRefGvk(f.Referrer.AsRNode())
if err != nil { if err != nil {
return acceptAll return acceptAll
} }

View File

@ -118,7 +118,6 @@ func (ns Filter) roleBindingHack(obj *yaml.RNode, meta yaml.ResourceMeta) error
// add the namespace to each "subject" with name: default // add the namespace to each "subject" with name: default
err = obj.VisitElements(func(o *yaml.RNode) error { err = obj.VisitElements(func(o *yaml.RNode) error {
// copied from kunstruct based kustomize NamespaceTransformer plugin
// The only case we need to force the namespace // The only case we need to force the namespace
// if for the "service account". "default" is // if for the "service account". "default" is
// kind of hardcoded here for right now. // kind of hardcoded here for right now.

View File

@ -4,7 +4,6 @@
package patchstrategicmerge package patchstrategicmerge
import ( import (
"sigs.k8s.io/kustomize/api/konfig"
"sigs.k8s.io/kustomize/kyaml/kio" "sigs.k8s.io/kustomize/kyaml/kio"
"sigs.k8s.io/kustomize/kyaml/yaml" "sigs.k8s.io/kustomize/kyaml/yaml"
"sigs.k8s.io/kustomize/kyaml/yaml/merge2" "sigs.k8s.io/kustomize/kyaml/yaml/merge2"
@ -29,7 +28,7 @@ func (pf Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if !konfig.FlagEnableKyamlDefaultValue || r != nil { if r != nil {
result = append(result, r) result = append(result, r)
} }
} }

View File

@ -20,12 +20,12 @@ func SortArrayAndComputeHash(s []string) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
return Encode(Hash(string(data))) return encode(hex256(string(data)))
} }
// Copied from https://github.com/kubernetes/kubernetes // Copied from https://github.com/kubernetes/kubernetes
// /blob/master/pkg/kubectl/util/hash/hash.go // /blob/master/pkg/kubectl/util/hash/hash.go
func Encode(hex string) (string, error) { func encode(hex string) (string, error) {
if len(hex) < 10 { if len(hex) < 10 {
return "", fmt.Errorf( return "", fmt.Errorf(
"input length must be at least 10") "input length must be at least 10")
@ -48,23 +48,18 @@ func Encode(hex string) (string, error) {
return string(enc), nil return string(enc), nil
} }
// Hash returns the hex form of the sha256 of the argument. // hex256 returns the hex form of the sha256 of the argument.
func Hash(data string) string { func hex256(data string) string {
return fmt.Sprintf("%x", sha256.Sum256([]byte(data))) return fmt.Sprintf("%x", sha256.Sum256([]byte(data)))
} }
// HashRNode returns the hash value of input RNode // Hasher computes the hash of an RNode.
func HashRNode(node *yaml.RNode) (string, error) { type Hasher struct{}
// get node kind
kindNode, err := node.Pipe(yaml.FieldMatcher{Name: "kind"})
if err != nil {
return "", err
}
kind := kindNode.YNode().Value
// calculate hash for different kinds // Hash returns a hash of the argument.
encoded := "" func (h *Hasher) Hash(node *yaml.RNode) (r string, err error) {
switch kind { var encoded string
switch node.GetKind() {
case "ConfigMap": case "ConfigMap":
encoded, err = encodeConfigMap(node) encoded, err = encodeConfigMap(node)
case "Secret": case "Secret":
@ -77,10 +72,11 @@ func HashRNode(node *yaml.RNode) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
return Encode(Hash(encoded)) return encode(hex256(encoded))
} }
func getNodeValues(node *yaml.RNode, paths []string) (map[string]interface{}, error) { func getNodeValues(
node *yaml.RNode, paths []string) (map[string]interface{}, error) {
values := make(map[string]interface{}) values := make(map[string]interface{})
for _, p := range paths { for _, p := range paths {
vn, err := node.Pipe(yaml.Lookup(p)) vn, err := node.Pipe(yaml.Lookup(p))
@ -117,8 +113,11 @@ func encodeConfigMap(node *yaml.RNode) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
m := map[string]interface{}{"kind": "ConfigMap", "name": values["metadata/name"], m := map[string]interface{}{
"data": values["data"]} "kind": "ConfigMap",
"name": values["metadata/name"],
"data": values["data"],
}
if _, ok := values["binaryData"].(map[string]interface{}); ok { if _, ok := values["binaryData"].(map[string]interface{}); ok {
m["binaryData"] = values["binaryData"] m["binaryData"] = values["binaryData"]
} }

View File

@ -5,8 +5,8 @@
package ifc package ifc
import ( import (
"sigs.k8s.io/kustomize/api/resid"
"sigs.k8s.io/kustomize/api/types" "sigs.k8s.io/kustomize/api/types"
"sigs.k8s.io/kustomize/kyaml/yaml"
) )
// Validator provides functions to validate annotations and labels // Validator provides functions to validate annotations and labels
@ -38,92 +38,10 @@ type Loader interface {
Cleanup() error Cleanup() error
} }
// Kunstructured represents a Kubernetes Resource Model object. // KustHasher returns a hash of the argument
type Kunstructured interface {
// Several uses.
Copy() Kunstructured
// GetAnnotations returns the k8s annotations.
GetAnnotations() map[string]string
// GetData returns a top-level "data" field, as in a ConfigMap.
GetDataMap() map[string]string
// GetData returns a top-level "binaryData" field, as in a ConfigMap.
GetBinaryDataMap() map[string]string
// Used by ResAccumulator and ReplacementTransformer.
GetFieldValue(string) (interface{}, error)
// Used by Resource.OrgId
GetGvk() resid.Gvk
// Used by resource.Factory.SliceFromBytes
GetKind() string
// GetLabels returns the k8s labels.
GetLabels() map[string]string
// Used by Resource.CurId and resource factory.
GetName() string
// Used by special case code in
// ResMap.SubsetThatCouldBeReferencedByResource
GetSlice(path string) ([]interface{}, error)
// GetString returns the value of a string field.
// Used by Resource.GetNamespace
GetString(string) (string, error)
// Several uses.
Map() (map[string]interface{}, error)
// Used by Resource.AsYAML and Resource.String
MarshalJSON() ([]byte, error)
// Used by resWrangler.Select
MatchesAnnotationSelector(selector string) (bool, error)
// Used by resWrangler.Select
MatchesLabelSelector(selector string) (bool, error)
// SetAnnotations replaces the k8s annotations.
SetAnnotations(map[string]string)
// SetDataMap sets a top-level "data" field, as in a ConfigMap.
SetDataMap(map[string]string)
// SetDataMap sets a top-level "binaryData" field, as in a ConfigMap.
SetBinaryDataMap(map[string]string)
// Used by PatchStrategicMergeTransformer.
SetGvk(resid.Gvk)
// SetLabels replaces the k8s labels.
SetLabels(map[string]string)
// SetName changes the name.
SetName(string)
// SetNamespace changes the namespace.
SetNamespace(string)
// Needed, for now, by kyaml/filtersutil.ApplyToJSON.
UnmarshalJSON([]byte) error
}
// KunstructuredFactory makes instances of Kunstructured.
type KunstructuredFactory interface {
SliceFromBytes([]byte) ([]Kunstructured, error)
FromMap(m map[string]interface{}) Kunstructured
Hasher() KunstructuredHasher
MakeConfigMap(kvLdr KvLoader, args *types.ConfigMapArgs) (Kunstructured, error)
MakeSecret(kvLdr KvLoader, args *types.SecretArgs) (Kunstructured, error)
}
// KunstructuredHasher returns a hash of the argument
// or an error. // or an error.
type KunstructuredHasher interface { type KustHasher interface {
Hash(Kunstructured) (string, error) Hash(*yaml.RNode) (string, error)
} }
// See core.v1.SecretTypeOpaque // See core.v1.SecretTypeOpaque

View File

@ -25,7 +25,7 @@ type OpenAPIDefinition struct {
Dependencies []string Dependencies []string
} }
type myProperties map[string]spec.Schema type myProperties = map[string]spec.Schema
type nameToApiMap map[string]OpenAPIDefinition type nameToApiMap map[string]OpenAPIDefinition
// LoadConfigFromCRDs parse CRD schemas from paths into a TransformerConfig // LoadConfigFromCRDs parse CRD schemas from paths into a TransformerConfig

View File

@ -1,23 +0,0 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package conflict
import (
"sigs.k8s.io/kustomize/api/resid"
"sigs.k8s.io/kustomize/api/resource"
)
type cdFactory struct{}
var _ resource.ConflictDetectorFactory = &cdFactory{}
// NewFactory returns a new conflict detector factory.
func NewFactory() resource.ConflictDetectorFactory {
return &cdFactory{}
}
// New returns an instance of smPatchMergeOnlyDetector.
func (c cdFactory) New(_ resid.Gvk) (resource.ConflictDetector, error) {
return &smPatchMergeOnlyDetector{}, nil
}

View File

@ -1,33 +0,0 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package conflict
import (
"sigs.k8s.io/kustomize/api/resource"
)
// smPatchMergeOnlyDetector ignores conflicts,
// but does real strategic merge patching.
// This is part of an effort to eliminate dependence on
// apimachinery package to allow kustomize integration
// into kubectl (#2506 and #1500)
type smPatchMergeOnlyDetector struct{}
var _ resource.ConflictDetector = &smPatchMergeOnlyDetector{}
func (c *smPatchMergeOnlyDetector) HasConflict(
_, _ *resource.Resource) (bool, error) {
return false, nil
}
// There's at least one case that doesn't work. Suppose one has a
// Deployment with a volume with the bizarre "emptyDir: {}" entry.
// If you want to get rid of this entry via a patch containing
// the entry "emptyDir: null", then the following won't work,
// because null entries are eliminated.
func (c *smPatchMergeOnlyDetector) MergePatches(
r, patch *resource.Resource) (*resource.Resource, error) {
err := r.ApplySmPatch(patch)
return r, err
}

View File

@ -13,6 +13,7 @@ import (
"strings" "strings"
"github.com/pkg/errors" "github.com/pkg/errors"
"sigs.k8s.io/kustomize/api/filesys"
"sigs.k8s.io/kustomize/api/ifc" "sigs.k8s.io/kustomize/api/ifc"
"sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers" "sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers"
"sigs.k8s.io/kustomize/api/internal/plugins/execplugin" "sigs.k8s.io/kustomize/api/internal/plugins/execplugin"
@ -29,11 +30,21 @@ import (
type Loader struct { type Loader struct {
pc *types.PluginConfig pc *types.PluginConfig
rf *resmap.Factory rf *resmap.Factory
fs filesys.FileSystem
// absolutePluginHome caches the location of a valid plugin root directory.
// It should only be set once the directory's existence has been confirmed.
absolutePluginHome string
} }
func NewLoader( func NewLoader(
pc *types.PluginConfig, rf *resmap.Factory) *Loader { pc *types.PluginConfig, rf *resmap.Factory, fs filesys.FileSystem) *Loader {
return &Loader{pc: pc, rf: rf} return &Loader{pc: pc, rf: rf, fs: fs}
}
// Config provides the global (not plugin specific) PluginConfig data.
func (l *Loader) Config() *types.PluginConfig {
return l.pc
} }
func (l *Loader) LoadGenerators( func (l *Loader) LoadGenerators(
@ -95,13 +106,47 @@ func relativePluginPath(id resid.ResId) string {
strings.ToLower(id.Kind)) strings.ToLower(id.Kind))
} }
func AbsolutePluginPath(pc *types.PluginConfig, id resid.ResId) string { func (l *Loader) AbsolutePluginPath(id resid.ResId) (string, error) {
return filepath.Join( pluginHome, err := l.absPluginHome()
pc.AbsPluginHome, relativePluginPath(id), id.Kind) if err != nil {
return "", err
}
return filepath.Join(pluginHome, relativePluginPath(id), id.Kind), nil
} }
func (l *Loader) absolutePluginPath(id resid.ResId) string { // absPluginHome is the home of kustomize Exec and Go plugins.
return AbsolutePluginPath(l.pc, id) // Kustomize plugin configuration files are k8s-style objects
// containing the fields 'apiVersion' and 'kind', e.g.
// apiVersion: apps/v1
// kind: Deployment
// kustomize reads plugin configuration data from a file path
// specified in the 'generators:' or 'transformers:' field of a
// kustomization file. For Exec and Go plugins, kustomize
// uses this data to both locate the plugin and configure it.
// Each Exec or Go plugin (its code, its tests, its supporting data
// files, etc.) must be housed in its own directory at
// ${absPluginHome}/${pluginApiVersion}/LOWERCASE(${pluginKind})
// where
// - ${absPluginHome} is an absolute path, defined below.
// - ${pluginApiVersion} is taken from the plugin config file.
// - ${pluginKind} is taken from the plugin config file.
func (l *Loader) absPluginHome() (string, error) {
// External plugins are disabled--return the dummy plugin root.
if l.pc.PluginRestrictions != types.PluginRestrictionsNone {
return konfig.NoPluginHomeSentinal, nil
}
// We've already determined plugin home--use the cached value.
if l.absolutePluginHome != "" {
return l.absolutePluginHome, nil
}
// Check default locations for a valid plugin root, and cache it if found.
dir, err := konfig.DefaultAbsPluginHome(l.fs)
if err != nil {
return "", err
}
l.absolutePluginHome = dir
return l.absolutePluginHome, nil
} }
func isBuiltinPlugin(res *resource.Resource) bool { func isBuiltinPlugin(res *resource.Resource) bool {
@ -148,7 +193,7 @@ func (l *Loader) loadAndConfigurePlugin(
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "marshalling yaml from res %s", res.OrgId()) return nil, errors.Wrapf(err, "marshalling yaml from res %s", res.OrgId())
} }
err = c.Config(resmap.NewPluginHelpers(ldr, v, l.rf), yaml) err = c.Config(resmap.NewPluginHelpers(ldr, v, l.rf, l.pc), yaml)
if err != nil { if err != nil {
return nil, errors.Wrapf( return nil, errors.Wrapf(
err, "plugin %s fails configuration", res.OrgId()) err, "plugin %s fails configuration", res.OrgId())
@ -176,10 +221,13 @@ func (l *Loader) loadPlugin(res *resource.Resource) (resmap.Configurable, error)
} }
func (l *Loader) loadExecOrGoPlugin(resId resid.ResId) (resmap.Configurable, error) { func (l *Loader) loadExecOrGoPlugin(resId resid.ResId) (resmap.Configurable, error) {
absPluginPath, err := l.AbsolutePluginPath(resId)
if err != nil {
return nil, err
}
// First try to load the plugin as an executable. // First try to load the plugin as an executable.
p := execplugin.NewExecPlugin(l.absolutePluginPath(resId)) p := execplugin.NewExecPlugin(absPluginPath)
err := p.ErrIfNotExecutable() if err = p.ErrIfNotExecutable(); err == nil {
if err == nil {
return p, nil return p, nil
} }
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
@ -193,7 +241,7 @@ func (l *Loader) loadExecOrGoPlugin(resId resid.ResId) (resmap.Configurable, err
return nil, err return nil, err
} }
// Failing the above, try loading it as a Go plugin. // Failing the above, try loading it as a Go plugin.
c, err := l.loadGoPlugin(resId) c, err := l.loadGoPlugin(resId, absPluginPath+".so")
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -208,12 +256,11 @@ func (l *Loader) loadExecOrGoPlugin(resId resid.ResId) (resmap.Configurable, err
// as a Loader instance variable. So make it a package variable. // as a Loader instance variable. So make it a package variable.
var registry = make(map[string]resmap.Configurable) var registry = make(map[string]resmap.Configurable)
func (l *Loader) loadGoPlugin(id resid.ResId) (resmap.Configurable, error) { func (l *Loader) loadGoPlugin(id resid.ResId, absPath string) (resmap.Configurable, error) {
regId := relativePluginPath(id) regId := relativePluginPath(id)
if c, ok := registry[regId]; ok { if c, ok := registry[regId]; ok {
return copyPlugin(c), nil return copyPlugin(c), nil
} }
absPath := l.absolutePluginPath(id) + ".so"
if !utils.FileExists(absPath) { if !utils.FileExists(absPath) {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"expected file with Go object code at: %s", absPath) "expected file with Go object code at: %s", absPath)

View File

@ -34,7 +34,7 @@ func GoBin() string {
// has her ${g}/${v}/$lower(${k})/${k}.go files. // has her ${g}/${v}/$lower(${k})/${k}.go files.
func DeterminePluginSrcRoot(fSys filesys.FileSystem) (string, error) { func DeterminePluginSrcRoot(fSys filesys.FileSystem) (string, error) {
return konfig.FirstDirThatExistsElseError( return konfig.FirstDirThatExistsElseError(
"source directory", fSys, []konfig.NotedFunc{ "plugin src root", fSys, []konfig.NotedFunc{
{ {
Note: "relative to unit test", Note: "relative to unit test",
F: func() string { F: func() string {

View File

@ -443,7 +443,10 @@ func (kt *KustTarget) configureBuiltinPlugin(
err, "builtin %s marshal", bpt) err, "builtin %s marshal", bpt)
} }
} }
err = p.Config(resmap.NewPluginHelpers(kt.ldr, kt.validator, kt.rFactory), y) err = p.Config(
resmap.NewPluginHelpers(
kt.ldr, kt.validator, kt.rFactory, kt.pLdr.Config()),
y)
if err != nil { if err != nil {
return errors.Wrapf( return errors.Wrapf(
err, "trouble configuring builtin %s with config: `\n%s`", bpt, string(y)) err, "trouble configuring builtin %s with config: `\n%s`", bpt, string(y))

View File

@ -112,16 +112,22 @@ var generatorConfigurators = map[builtinhelpers.BuiltinPluginType]func(
return return
}, },
builtinhelpers.HelmChartInflationGenerator: func(kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f gFactory) ( builtinhelpers.HelmChartInflationGenerator: func(
kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f gFactory) (
result []resmap.Generator, err error) { result []resmap.Generator, err error) {
var c struct { var c struct {
types.HelmChartArgs types.HelmGlobals
types.HelmChart
} }
for _, args := range kt.kustomization.HelmChartInflationGenerator { var globals types.HelmGlobals
c.HelmChartArgs = args if kt.kustomization.HelmGlobals != nil {
globals = *kt.kustomization.HelmGlobals
}
for _, chart := range kt.kustomization.HelmCharts {
c.HelmGlobals = globals
c.HelmChart = chart
p := f() p := f()
err := kt.configureBuiltinPlugin(p, c, bpt) if err = kt.configureBuiltinPlugin(p, c, bpt); err != nil {
if err != nil {
return nil, err return nil, err
} }
result = append(result, p) result = append(result, p)
@ -201,14 +207,16 @@ var transformerConfigurators = map[builtinhelpers.BuiltinPluginType]func(
return return
} }
var c struct { var c struct {
Path string `json:"path,omitempty" yaml:"path,omitempty"` Path string `json:"path,omitempty" yaml:"path,omitempty"`
Patch string `json:"patch,omitempty" yaml:"patch,omitempty"` Patch string `json:"patch,omitempty" yaml:"patch,omitempty"`
Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"` Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"`
Options map[string]bool `json:"options,omitempty" yaml:"options,omitempty"`
} }
for _, pc := range kt.kustomization.Patches { for _, pc := range kt.kustomization.Patches {
c.Target = pc.Target c.Target = pc.Target
c.Patch = pc.Patch c.Patch = pc.Patch
c.Path = pc.Path c.Path = pc.Path
c.Options = pc.Options
p := f() p := f()
err = kt.configureBuiltinPlugin(p, c, bpt) err = kt.configureBuiltinPlugin(p, c, bpt)
if err != nil { if err != nil {
@ -221,6 +229,31 @@ var transformerConfigurators = map[builtinhelpers.BuiltinPluginType]func(
builtinhelpers.LabelTransformer: func( builtinhelpers.LabelTransformer: func(
kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) (
result []resmap.Transformer, err error) { result []resmap.Transformer, err error) {
for _, label := range kt.kustomization.Labels {
var c struct {
Labels map[string]string
FieldSpecs []types.FieldSpec
}
c.Labels = label.Pairs
fss := types.FsSlice(label.FieldSpecs)
// merge the custom fieldSpecs with the default
if label.IncludeSelectors {
fss, err = fss.MergeAll(tc.CommonLabels)
} else {
// only add to metadata by default
fss, err = fss.MergeOne(types.FieldSpec{Path: "metadata/labels", CreateIfNotPresent: true})
}
if err != nil {
return nil, err
}
c.FieldSpecs = fss
p := f()
err = kt.configureBuiltinPlugin(p, c, bpt)
if err != nil {
return nil, err
}
result = append(result, p)
}
var c struct { var c struct {
Labels map[string]string Labels map[string]string
FieldSpecs []types.FieldSpec FieldSpecs []types.FieldSpec

View File

@ -4,15 +4,12 @@
package target package target
import ( import (
"fmt"
"sigs.k8s.io/kustomize/api/resmap" "sigs.k8s.io/kustomize/api/resmap"
) )
// multiTransformer contains a list of transformers. // multiTransformer contains a list of transformers.
type multiTransformer struct { type multiTransformer struct {
transformers []resmap.Transformer transformers []resmap.Transformer
checkConflictEnabled bool
} }
var _ resmap.Transformer = &multiTransformer{} var _ resmap.Transformer = &multiTransformer{}
@ -20,8 +17,8 @@ var _ resmap.Transformer = &multiTransformer{}
// newMultiTransformer constructs a multiTransformer. // newMultiTransformer constructs a multiTransformer.
func newMultiTransformer(t []resmap.Transformer) resmap.Transformer { func newMultiTransformer(t []resmap.Transformer) resmap.Transformer {
r := &multiTransformer{ r := &multiTransformer{
transformers: make([]resmap.Transformer, len(t)), transformers: make([]resmap.Transformer, len(t)),
checkConflictEnabled: false} }
copy(r.transformers, t) copy(r.transformers, t)
return r return r
} }
@ -29,57 +26,11 @@ func newMultiTransformer(t []resmap.Transformer) resmap.Transformer {
// Transform applies the member transformers in order to the resources, // Transform applies the member transformers in order to the resources,
// optionally detecting and erroring on commutation conflict. // optionally detecting and erroring on commutation conflict.
func (o *multiTransformer) Transform(m resmap.ResMap) error { func (o *multiTransformer) Transform(m resmap.ResMap) error {
if o.checkConflictEnabled {
return o.transformWithCheckConflict(m)
}
return o.transform(m)
}
func (o *multiTransformer) transform(m resmap.ResMap) error {
for _, t := range o.transformers { for _, t := range o.transformers {
err := t.Transform(m) if err := t.Transform(m); err != nil {
if err != nil {
return err return err
} }
} m.DropEmpties()
for _, r := range m.Resources() {
empty, err := r.IsEmpty()
if err != nil {
return err
}
if empty {
err := m.Remove(r.CurId())
if err != nil {
return err
}
}
} }
return nil return nil
} }
// Of the len(o.transformers)! possible transformer orderings, compare to a reversed order.
// A spot check to perform when the transformations are supposed to be commutative.
// Fail if there's a difference in the result.
func (o *multiTransformer) transformWithCheckConflict(m resmap.ResMap) error {
mcopy := m.DeepCopy()
err := o.transform(m)
if err != nil {
return err
}
o.reverseTransformers()
err = o.transform(mcopy)
if err != nil {
return err
}
err = m.ErrorIfNotEqualSets(mcopy)
if err != nil {
return fmt.Errorf("found conflict between different patches\n%v", err)
}
return nil
}
func (o *multiTransformer) reverseTransformers() {
for i, j := 0, len(o.transformers)-1; i < j; i, j = i+1, j-1 {
o.transformers[i], o.transformers[j] = o.transformers[j], o.transformers[i]
}
}

View File

@ -1,108 +0,0 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package wrappy
import (
"fmt"
"sigs.k8s.io/kustomize/api/hasher"
"sigs.k8s.io/kustomize/api/ifc"
"sigs.k8s.io/kustomize/api/internal/generators"
"sigs.k8s.io/kustomize/api/konfig"
"sigs.k8s.io/kustomize/api/types"
"sigs.k8s.io/kustomize/kyaml/filtersutil"
"sigs.k8s.io/kustomize/kyaml/kio"
"sigs.k8s.io/kustomize/kyaml/yaml"
)
// WNodeFactory makes instances of WNode.
//
// These instances in turn adapt
// sigs.k8s.io/kustomize/kyaml/yaml.RNode
// to implement ifc.Unstructured.
// This factory is meant to implement ifc.KunstructuredFactory.
//
// This implementation should be thin, as both WNode and WNodeFactory must be
// factored away (deleted) along with ifc.Kunstructured in favor of direct use
// of RNode methods upon completion of
// https://github.com/kubernetes-sigs/kustomize/issues/2506.
//
// See also api/krusty/internal/provider/depprovider.go
type WNodeFactory struct {
}
var _ ifc.KunstructuredFactory = (*WNodeFactory)(nil)
func (k *WNodeFactory) SliceFromBytes(bs []byte) ([]ifc.Kunstructured, error) {
yamlRNodes, err := kio.FromBytes(bs)
if err != nil {
return nil, err
}
var result []ifc.Kunstructured
for i := range yamlRNodes {
rn := yamlRNodes[i]
meta, err := rn.GetValidatedMetadata()
if err != nil {
return nil, err
}
if !shouldDropObject(meta) {
if foundNil, path := rn.HasNilEntryInList(); foundNil {
return nil, fmt.Errorf("empty item at %v in object %v", path, rn)
}
result = append(result, FromRNode(rn))
}
}
return result, nil
}
// shouldDropObject returns true if the resource should not be accumulated.
func shouldDropObject(m yaml.ResourceMeta) bool {
_, y := m.ObjectMeta.Annotations[konfig.IgnoredByKustomizeAnnotation]
return y
}
func (k *WNodeFactory) FromMap(m map[string]interface{}) ifc.Kunstructured {
rn, err := FromMap(m)
if err != nil {
// TODO(#WNodeFactory): handle or bubble error"
panic(err)
}
return rn
}
// kustHash computes a hash of an unstructured object.
type kustHash struct{}
// Hash returns a hash of the given object
func (h *kustHash) Hash(m ifc.Kunstructured) (string, error) {
node, err := filtersutil.GetRNode(m)
if err != nil {
return "", err
}
return hasher.HashRNode(node)
}
func (k *WNodeFactory) Hasher() ifc.KunstructuredHasher {
return &kustHash{}
}
// MakeConfigMap makes a wrapped configmap.
func (k *WNodeFactory) MakeConfigMap(
ldr ifc.KvLoader, args *types.ConfigMapArgs) (ifc.Kunstructured, error) {
rn, err := generators.MakeConfigMap(ldr, args)
if err != nil {
return nil, err
}
return FromRNode(rn), nil
}
// MakeSecret makes a wrapped secret.
func (k *WNodeFactory) MakeSecret(
ldr ifc.KvLoader, args *types.SecretArgs) (ifc.Kunstructured, error) {
rn, err := generators.MakeSecret(ldr, args)
if err != nil {
return nil, err
}
return FromRNode(rn), nil
}

View File

@ -1,292 +0,0 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package wrappy
import (
"fmt"
"log"
"regexp"
"strconv"
"strings"
"sigs.k8s.io/kustomize/api/ifc"
"sigs.k8s.io/kustomize/api/resid"
"sigs.k8s.io/kustomize/kyaml/yaml"
)
// WNode implements ifc.Kunstructured using yaml.RNode.
//
// It exists only to help manage a switch from
// kunstruct.UnstructAdapter to yaml.RNode as the core
// representation of KRM objects in kustomize.
//
// It's got a silly name because we don't want it around for long,
// and want its use to be obvious.
type WNode struct {
node *yaml.RNode
}
var _ ifc.Kunstructured = (*WNode)(nil)
func NewWNode() *WNode {
return FromRNode(yaml.NewRNode(nil))
}
func FromMap(m map[string]interface{}) (*WNode, error) {
n, err := yaml.FromMap(m)
if err != nil {
return nil, err
}
return FromRNode(n), nil
}
func FromRNode(node *yaml.RNode) *WNode {
return &WNode{node: node}
}
func (wn *WNode) AsRNode() *yaml.RNode {
return wn.node
}
func (wn *WNode) demandMetaData(label string) yaml.ResourceMeta {
meta, err := wn.node.GetMeta()
if err != nil {
// Log and die since interface doesn't allow error.
log.Fatalf("for %s', expected valid resource: %v", label, err)
}
return meta
}
// Copy implements ifc.Kunstructured.
func (wn *WNode) Copy() ifc.Kunstructured {
return &WNode{node: wn.node.Copy()}
}
// GetAnnotations implements ifc.Kunstructured.
func (wn *WNode) GetAnnotations() map[string]string {
return wn.demandMetaData("GetAnnotations").Annotations
}
// convertSliceIndex traverses the items in `fields` and find
// if there is a slice index in the item and change it to a
// valid Lookup field path. For example, 'ports[0]' will be
// converted to 'ports' and '0'.
func convertSliceIndex(fields []string) []string {
var res []string
for _, s := range fields {
if !strings.HasSuffix(s, "]") {
res = append(res, s)
continue
}
re := regexp.MustCompile(`^(.*)\[(\d+)\]$`)
groups := re.FindStringSubmatch(s)
if len(groups) == 0 {
// no match, add to result
res = append(res, s)
continue
}
if groups[1] != "" {
res = append(res, groups[1])
}
res = append(res, groups[2])
}
return res
}
// GetFieldValue implements ifc.Kunstructured.
func (wn *WNode) GetFieldValue(path string) (interface{}, error) {
fields := convertSliceIndex(strings.Split(path, "."))
rn, err := wn.node.Pipe(yaml.Lookup(fields...))
if err != nil {
return nil, err
}
if rn == nil {
return nil, NoFieldError{path}
}
yn := rn.YNode()
// If this is an alias node, resolve it
if yn.Kind == yaml.AliasNode {
yn = yn.Alias
}
// Return value as map for DocumentNode and MappingNode kinds
if yn.Kind == yaml.DocumentNode || yn.Kind == yaml.MappingNode {
var result map[string]interface{}
if err := yn.Decode(&result); err != nil {
return nil, err
}
return result, err
}
// Return value as slice for SequenceNode kind
if yn.Kind == yaml.SequenceNode {
var result []interface{}
if err := yn.Decode(&result); err != nil {
return nil, err
}
return result, nil
}
if yn.Kind != yaml.ScalarNode {
return nil, fmt.Errorf("expected ScalarNode, got Kind=%d", yn.Kind)
}
// TODO: When doing kustomize var replacement, which is likely a
// a primary use of this function and the reason it returns interface{}
// rather than string, we do conversion from Nodes to Go types and back
// to nodes. We should figure out how to do replacement using raw nodes,
// assuming we keep the var feature in kustomize.
// The other end of this is: refvar.go:updateNodeValue.
switch yn.Tag {
case yaml.NodeTagString:
return yn.Value, nil
case yaml.NodeTagInt:
return strconv.Atoi(yn.Value)
case yaml.NodeTagFloat:
return strconv.ParseFloat(yn.Value, 64)
case yaml.NodeTagBool:
return strconv.ParseBool(yn.Value)
default:
// Possibly this should be an error or log.
return yn.Value, nil
}
}
// GetGvk implements ifc.Kunstructured.
func (wn *WNode) GetGvk() resid.Gvk {
meta := wn.demandMetaData("GetGvk")
g, v := resid.ParseGroupVersion(meta.APIVersion)
return resid.Gvk{Group: g, Version: v, Kind: meta.Kind}
}
// GetDataMap implements ifc.Kunstructured.
func (wn *WNode) GetDataMap() map[string]string {
return wn.node.GetDataMap()
}
// SetDataMap implements ifc.Kunstructured.
func (wn *WNode) SetDataMap(m map[string]string) {
wn.node.SetDataMap(m)
}
// GetBinaryDataMap implements ifc.Kunstructured.
func (wn *WNode) GetBinaryDataMap() map[string]string {
return wn.node.GetBinaryDataMap()
}
// SetBinaryDataMap implements ifc.Kunstructured.
func (wn *WNode) SetBinaryDataMap(m map[string]string) {
wn.node.SetBinaryDataMap(m)
}
// GetKind implements ifc.Kunstructured.
func (wn *WNode) GetKind() string {
return wn.demandMetaData("GetKind").Kind
}
// GetLabels implements ifc.Kunstructured.
func (wn *WNode) GetLabels() map[string]string {
return wn.demandMetaData("GetLabels").Labels
}
// GetName implements ifc.Kunstructured.
func (wn *WNode) GetName() string {
return wn.demandMetaData("GetName").Name
}
// GetSlice implements ifc.Kunstructured.
func (wn *WNode) GetSlice(path string) ([]interface{}, error) {
value, err := wn.GetFieldValue(path)
if err != nil {
return nil, err
}
if sliceValue, ok := value.([]interface{}); ok {
return sliceValue, nil
}
return nil, fmt.Errorf("node %s is not a slice", path)
}
// GetSlice implements ifc.Kunstructured.
func (wn *WNode) GetString(path string) (string, error) {
value, err := wn.GetFieldValue(path)
if err != nil {
return "", err
}
if v, ok := value.(string); ok {
return v, nil
}
return "", fmt.Errorf("node %s is not a string: %v", path, value)
}
// Map implements ifc.Kunstructured.
func (wn *WNode) Map() (map[string]interface{}, error) {
return wn.node.Map()
}
// MarshalJSON implements ifc.Kunstructured.
func (wn *WNode) MarshalJSON() ([]byte, error) {
return wn.node.MarshalJSON()
}
// MatchesAnnotationSelector implements ifc.Kunstructured.
func (wn *WNode) MatchesAnnotationSelector(selector string) (bool, error) {
return wn.node.MatchesAnnotationSelector(selector)
}
// MatchesLabelSelector implements ifc.Kunstructured.
func (wn *WNode) MatchesLabelSelector(selector string) (bool, error) {
return wn.node.MatchesLabelSelector(selector)
}
// SetAnnotations implements ifc.Kunstructured.
func (wn *WNode) SetAnnotations(annotations map[string]string) {
if err := wn.node.SetAnnotations(annotations); err != nil {
log.Fatal(err) // interface doesn't allow error.
}
}
// SetGvk implements ifc.Kunstructured.
func (wn *WNode) SetGvk(gvk resid.Gvk) {
wn.setMapField(yaml.NewScalarRNode(gvk.Kind), yaml.KindField)
wn.setMapField(yaml.NewScalarRNode(gvk.ApiVersion()), yaml.APIVersionField)
}
// SetLabels implements ifc.Kunstructured.
func (wn *WNode) SetLabels(labels map[string]string) {
if err := wn.node.SetLabels(labels); err != nil {
log.Fatal(err) // interface doesn't allow error.
}
}
// SetName implements ifc.Kunstructured.
func (wn *WNode) SetName(name string) {
wn.setMapField(yaml.NewScalarRNode(name), yaml.MetadataField, yaml.NameField)
}
// SetNamespace implements ifc.Kunstructured.
func (wn *WNode) SetNamespace(ns string) {
if err := wn.node.SetNamespace(ns); err != nil {
log.Fatal(err) // interface doesn't allow error.
}
}
func (wn *WNode) setMapField(value *yaml.RNode, path ...string) {
if err := wn.node.SetMapField(value, path...); err != nil {
// Log and die since interface doesn't allow error.
log.Fatalf("failed to set field %v: %v", path, err)
}
}
// UnmarshalJSON implements ifc.Kunstructured.
func (wn *WNode) UnmarshalJSON(data []byte) error {
return wn.node.UnmarshalJSON(data)
}
type NoFieldError struct {
Field string
}
func (e NoFieldError) Error() string {
return fmt.Sprintf("no field named '%s'", e.Field)
}

View File

@ -19,5 +19,8 @@ namespace:
group: apiregistration.k8s.io group: apiregistration.k8s.io
kind: APIService kind: APIService
create: true create: true
- path: spec/conversion/webhook/clientConfig/service/namespace
group: apiextensions.k8s.io
kind: CustomResourceDefinition
` `
) )

View File

@ -19,31 +19,7 @@ func DefaultKustomizationFileName() string {
return RecognizedKustomizationFileNames()[0] return RecognizedKustomizationFileNames()[0]
} }
// IfApiMachineryElseKyaml returns true if executing the apimachinery code
// path, else we're executing the kyaml code paths.
func IfApiMachineryElseKyaml(s1, s2 string) string {
if !FlagEnableKyamlDefaultValue {
return s1
}
return s2
}
const ( const (
// FlagEnableKyamlDefaultValue is the default value for the --enable_kyaml
// flag. This value is also used in unit tests. See provider.DepProvider.
//
// TODO(#3588): Delete this constant.
//
// All tests should pass for either true or false values
// of this constant, without having to check its value.
// In the cases where there's a different outcome, either decide
// that the difference is acceptable, or make the difference go away.
//
// Historically, tests passed for enable_kyaml == false, i.e. using
// apimachinery libs. This doesn't mean the code was better, it just
// means regression tests preserved those outcomes.
FlagEnableKyamlDefaultValue = true
// An environment variable to consult for kustomization // An environment variable to consult for kustomization
// configuration data. See: // configuration data. See:
// https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html // https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html

View File

@ -41,32 +41,6 @@ const (
NoPluginHomeSentinal = "/No/non-builtin/plugins!" NoPluginHomeSentinal = "/No/non-builtin/plugins!"
) )
func EnabledPluginConfig(b types.BuiltinPluginLoadingOptions) (*types.PluginConfig, error) {
dir, err := DefaultAbsPluginHome(filesys.MakeFsOnDisk())
if err != nil {
return nil, err
}
return MakePluginConfig(types.PluginRestrictionsNone, b, dir), nil
}
func DisabledPluginConfig() *types.PluginConfig {
return MakePluginConfig(
types.PluginRestrictionsBuiltinsOnly,
types.BploUseStaticallyLinked,
NoPluginHomeSentinal)
}
func MakePluginConfig(
pr types.PluginRestrictions,
b types.BuiltinPluginLoadingOptions,
home string) *types.PluginConfig {
return &types.PluginConfig{
PluginRestrictions: pr,
AbsPluginHome: home,
BpLoadingOptions: b,
}
}
type NotedFunc struct { type NotedFunc struct {
Note string Note string
F func() string F func() string
@ -77,7 +51,7 @@ type NotedFunc struct {
// the home of kustomize plugins. // the home of kustomize plugins.
func DefaultAbsPluginHome(fSys filesys.FileSystem) (string, error) { func DefaultAbsPluginHome(fSys filesys.FileSystem) (string, error) {
return FirstDirThatExistsElseError( return FirstDirThatExistsElseError(
"plugin home directory", fSys, []NotedFunc{ "plugin root", fSys, []NotedFunc{
{ {
Note: "homed in $" + KustomizePluginHomeEnv, Note: "homed in $" + KustomizePluginHomeEnv,
F: func() string { F: func() string {
@ -87,9 +61,11 @@ func DefaultAbsPluginHome(fSys filesys.FileSystem) (string, error) {
{ {
Note: "homed in $" + XdgConfigHomeEnv, Note: "homed in $" + XdgConfigHomeEnv,
F: func() string { F: func() string {
return filepath.Join( if root := os.Getenv(XdgConfigHomeEnv); root != "" {
os.Getenv(XdgConfigHomeEnv), return filepath.Join(root, ProgramName, RelPluginHome)
ProgramName, RelPluginHome) }
// do not look in "kustomize/plugin" if XdgConfigHomeEnv is unset
return ""
}, },
}, },
{ {
@ -118,11 +94,14 @@ func FirstDirThatExistsElseError(
pathFuncs []NotedFunc) (string, error) { pathFuncs []NotedFunc) (string, error) {
var nope []types.Pair var nope []types.Pair
for _, dt := range pathFuncs { for _, dt := range pathFuncs {
dir := dt.F() if dir := dt.F(); dir != "" {
if fSys.Exists(dir) { if fSys.Exists(dir) {
return dir, nil return dir, nil
}
nope = append(nope, types.Pair{Key: dt.Note, Value: dir})
} else {
nope = append(nope, types.Pair{Key: dt.Note, Value: "<no value>"})
} }
nope = append(nope, types.Pair{Key: dt.Note, Value: dir})
} }
return "", types.NewErrUnableToFind(what, nope) return "", types.NewErrUnableToFind(what, nope)
} }

View File

@ -36,7 +36,7 @@ type Kustomizer struct {
func MakeKustomizer(o *Options) *Kustomizer { func MakeKustomizer(o *Options) *Kustomizer {
return &Kustomizer{ return &Kustomizer{
options: o, options: o,
depProvider: provider.NewDepProvider(o.UseKyaml), depProvider: provider.NewDepProvider(),
} }
} }
@ -52,9 +52,7 @@ func MakeKustomizer(o *Options) *Kustomizer {
// and Run can be called on each of them). // and Run can be called on each of them).
func (b *Kustomizer) Run( func (b *Kustomizer) Run(
fSys filesys.FileSystem, path string) (resmap.ResMap, error) { fSys filesys.FileSystem, path string) (resmap.ResMap, error) {
resmapFactory := resmap.NewFactory( resmapFactory := resmap.NewFactory(b.depProvider.GetResourceFactory())
b.depProvider.GetResourceFactory(),
b.depProvider.GetConflictDetectorFactory())
lr := fLdr.RestrictionNone lr := fLdr.RestrictionNone
if b.options.LoadRestrictions == types.LoadRestrictionsRootOnly { if b.options.LoadRestrictions == types.LoadRestrictionsRootOnly {
lr = fLdr.RestrictionRootOnly lr = fLdr.RestrictionRootOnly
@ -68,7 +66,8 @@ func (b *Kustomizer) Run(
ldr, ldr,
b.depProvider.GetFieldValidator(), b.depProvider.GetFieldValidator(),
resmapFactory, resmapFactory,
pLdr.NewLoader(b.options.PluginConfig, resmapFactory), // The plugin configs are always located on disk, regardless of the fSys passed in
pLdr.NewLoader(b.options.PluginConfig, resmapFactory, filesys.MakeFsOnDisk()),
) )
err = kt.Load() err = kt.Load()
if err != nil { if err != nil {

View File

@ -5,7 +5,6 @@ package krusty
import ( import (
"sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers" "sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers"
"sigs.k8s.io/kustomize/api/konfig"
"sigs.k8s.io/kustomize/api/types" "sigs.k8s.io/kustomize/api/types"
) )
@ -33,37 +32,19 @@ type Options struct {
// Options related to kustomize plugins. // Options related to kustomize plugins.
PluginConfig *types.PluginConfig PluginConfig *types.PluginConfig
// TODO(#3588): Delete this field (it's always true).
// When true, use kyaml/ packages to manipulate KRM yaml.
// When false, use k8sdeps/ instead (uses k8s.io/api* packages).
UseKyaml bool
// When true, allow name and kind changing via a patch
// When false, patch name/kind don't overwrite target name/kind
AllowResourceIdChanges bool
} }
// MakeDefaultOptions returns a default instance of Options. // MakeDefaultOptions returns a default instance of Options.
func MakeDefaultOptions() *Options { func MakeDefaultOptions() *Options {
return &Options{ return &Options{
DoLegacyResourceSort: false, DoLegacyResourceSort: false,
AddManagedbyLabel: false, AddManagedbyLabel: false,
LoadRestrictions: types.LoadRestrictionsRootOnly, LoadRestrictions: types.LoadRestrictionsRootOnly,
DoPrune: false, DoPrune: false,
PluginConfig: konfig.DisabledPluginConfig(), PluginConfig: types.DisabledPluginConfig(),
UseKyaml: konfig.FlagEnableKyamlDefaultValue,
AllowResourceIdChanges: false,
} }
} }
func (o Options) IfApiMachineryElseKyaml(s1, s2 string) string {
if !o.UseKyaml {
return s1
}
return s2
}
// GetBuiltinPluginNames returns a list of builtin plugin names // GetBuiltinPluginNames returns a list of builtin plugin names
func GetBuiltinPluginNames() []string { func GetBuiltinPluginNames() []string {
var ret []string var ret []string

View File

@ -319,7 +319,6 @@ func (fl *fileLoader) Load(path string) ([]byte, error) {
} }
return body, nil return body, nil
} }
if !filepath.IsAbs(path) { if !filepath.IsAbs(path) {
path = fl.root.Join(path) path = fl.root.Join(path)
} }

View File

@ -4,198 +4,39 @@
package provider package provider
import ( import (
"log" "sigs.k8s.io/kustomize/api/hasher"
"sigs.k8s.io/kustomize/api/ifc" "sigs.k8s.io/kustomize/api/ifc"
"sigs.k8s.io/kustomize/api/internal/conflict"
"sigs.k8s.io/kustomize/api/internal/validate" "sigs.k8s.io/kustomize/api/internal/validate"
"sigs.k8s.io/kustomize/api/internal/wrappy"
"sigs.k8s.io/kustomize/api/konfig"
"sigs.k8s.io/kustomize/api/resource" "sigs.k8s.io/kustomize/api/resource"
) )
// DepProvider is a dependency provider. // DepProvider is a dependency provider, injecting different
// // implementations depending on the context.
// The instances it returns are either
// - old implementations backed by k8sdeps code,
// - new implementations backed by kyaml code.
//
// History:
//
// kubectl depends on k8s.io code, and at the time of writing, so
// does kustomize. Code that imports k8s.io/api* cannot be imported
// back into k8s.io/*, yet kustomize appears inside k8s.io/kubectl.
//
// To allow kustomize to appear inside kubectl, yet still be developed
// outside kubectl, the kustomize code was divided into the following
// packages
//
// api/
// k8sdeps/ (and internal/ks8deps/)
// ifc/
// krusty/
// everythingElse/
//
// with the following rules:
//
// - Only k8sdeps/ may import k8s.io/api*.
//
// - Only krusty/ (and its internals) may import k8sdeps/.
// I.e., ifc/ and everythingElse/ must not
// import k8sdeps/ or k8s.io/api*.
//
// - Code in krusty/ may use code in k8sdeps/ to create
// objects then inject said objects into
// everythingElse/ behind dependency neutral interfaces.
//
// The idea was to periodically copy, not import, the large k8sdeps/
// tree (plus a snippet from krusty/kustomizer.go) into the kubectl
// codebase via a large PR, and have kubectl depend on the rest via
// normal importing.
//
// Over 2019, however, kubectl underwent large changes including
// a switch to Go modules, and a concerted attempt to extract kubectl
// from the k8s repo. This made large kustomize integration PRs too
// intrusive to review.
//
// In 2020, kubectl is based on Go modules, and almost entirely
// extracted from the k8s.io repositories, and further the kyaml
// library has a appeared as a viable replacement to k8s.io/api*
// KRM manipulation code.
//
// The new plan is to eliminate k8sdeps/ entirely, along with its
// k8s.io/api* dependence, allowing kustomize code to be imported
// into kubectl via normal Go module imports. Then the kustomize API
// code can then move into the github.com/kubernetes-sigs/cli-utils
// repo. The kustomize CLI in github.com/kubernetes-sigs/kustomize
// and the kubectl CLI can then both depend on the kustomize API.
//
// So, all code that depends on k8sdeps must go behind interfaces,
// and kustomize must be factored to choose the implementation.
//
// That problem has been reduced to three interfaces, each having
// two implementations. (1) is k8sdeps-based, (2) is kyaml-based.
//
// - ifc.Kunstructured
//
// 1) api/k8sdeps/kunstruct.UnstructAdapter
//
// This adapts structs in
// k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
// to ifc.Kunstructured.
//
// 2) api/wrappy.WNode
//
// This adapts sigs.k8s.io/kustomize/kyaml/yaml.RNode
// to ifc.Unstructured.
//
// At time of writing, implementation started.
// Further reducing the size of ifc.Kunstructed
// would really reduce the work
// (e.g. drop Vars, drop ReplacementTranformer).
//
// - resource.ConflictDetector
//
// 1) api/internal/k8sdeps/conflict.conflictDetectorJson
// api/internal/k8sdeps/conflict.conflictDetectorSm
//
// Uses k8s.io/apimachinery/pkg/util/strategicpatch,
// apimachinery/pkg/util/mergepatch, etc. to merge
// resource.Resource instances.
//
// 2) api/internal/conflict.smPatchMergeOnlyDetector
//
// At time of writing, this doesn't report conflicts,
// but it does know how to merge patches. Conflict
// reporting isn't vital to kustomize function. It's
// rare that a person would configure one transformer
// with many patches, much less so many that it became
// hard to spot conflicts. In the case of an undetected
// conflict, the last patch applied wins, likely what
// the user wants anyway. Regardless, the effect of this
// is plainly visible and usable in the output, even if
// a conflict happened but wasn't reported as an error.
//
// - ifc.Validator
//
// 1) api/k8sdeps/validator.KustValidator
//
// Uses k8s.io/apimachinery/pkg/api/validation and
// friends to validate strings.
//
// 2) api/internal/validate.FieldValidator
//
// See TODO inside the validator for status.
// At time of writing, this is a do-nothing
// validator as it's not critical to kustomize function.
//
// Proposed plan:
// [x] Ship kustomize with the ability to switch from 1 to 2 via
// an --enable_kyaml flag.
// [x] Make --enable_kyaml true by default.
// [x] When 2 is not noticeably more buggy than 1, delete 1.
// I.e. delete k8sdeps/, transitively deleting all k8s.io/api* deps.
// This DepProvider should be left in place to retain these
// comments, but it will have only one choice.
// [x] The way is now clear to reintegrate into kubectl.
// This should be done ASAP; the last step is cleanup.
// [ ] Cleanup. With only one impl of Kunstructure remaining,
// that interface and WNode can be deleted, along with this
// DepProvider. The other two interfaces could be dropped too.
//
// When the above is done, kustomize will use yaml.RNode and/or
// KRM Config Functions directly and exclusively.
// If you're reading this, plan not done.
//
type DepProvider struct { type DepProvider struct {
kFactory ifc.KunstructuredFactory resourceFactory *resource.Factory
resourceFactory *resource.Factory // implemented by api/internal/validate.FieldValidator
conflictDectectorFactory resource.ConflictDetectorFactory // See TODO inside the validator for status.
fieldValidator ifc.Validator // At time of writing, this is a do-nothing
// validator as it's not critical to kustomize function.
fieldValidator ifc.Validator
} }
// The dependencies this method needs have been deleted - func NewDepProvider() *DepProvider {
// see comments above. This method will be deleted rf := resource.NewFactory(&hasher.Hasher{})
// along with DepProvider in the final step.
func makeK8sdepBasedInstances() *DepProvider {
log.Fatal("This binary cannot use k8s.io code; it must use kyaml.")
return nil
}
func makeKyamlBasedInstances() *DepProvider {
kf := &wrappy.WNodeFactory{}
rf := resource.NewFactory(kf)
return &DepProvider{ return &DepProvider{
kFactory: kf, resourceFactory: rf,
resourceFactory: rf, fieldValidator: validate.NewFieldValidator(),
conflictDectectorFactory: conflict.NewFactory(),
fieldValidator: validate.NewFieldValidator(),
} }
} }
func NewDepProvider(useKyaml bool) *DepProvider {
if useKyaml {
return makeKyamlBasedInstances()
}
return makeK8sdepBasedInstances()
}
func NewDefaultDepProvider() *DepProvider { func NewDefaultDepProvider() *DepProvider {
return NewDepProvider(konfig.FlagEnableKyamlDefaultValue) return NewDepProvider()
}
func (dp *DepProvider) GetKunstructuredFactory() ifc.KunstructuredFactory {
return dp.kFactory
} }
func (dp *DepProvider) GetResourceFactory() *resource.Factory { func (dp *DepProvider) GetResourceFactory() *resource.Factory {
return dp.resourceFactory return dp.resourceFactory
} }
func (dp *DepProvider) GetConflictDetectorFactory() resource.ConflictDetectorFactory {
return dp.conflictDectectorFactory
}
func (dp *DepProvider) GetFieldValidator() ifc.Validator { func (dp *DepProvider) GetFieldValidator() ifc.Validator {
return dp.fieldValidator return dp.fieldValidator
} }

View File

@ -36,6 +36,14 @@ func ParseGroupVersion(apiVersion string) (group, version string) {
// GvkFromString makes a Gvk from the output of Gvk.String(). // GvkFromString makes a Gvk from the output of Gvk.String().
func GvkFromString(s string) Gvk { func GvkFromString(s string) Gvk {
values := strings.Split(s, fieldSep) values := strings.Split(s, fieldSep)
if len(values) != 3 {
// ...then the string didn't come from Gvk.String().
return Gvk{
Group: noGroup,
Version: noVersion,
Kind: noKind,
}
}
g := values[0] g := values[0]
if g == noGroup { if g == noGroup {
g = "" g = ""
@ -213,7 +221,10 @@ func (x Gvk) toKyamlTypeMeta() yaml.TypeMeta {
} }
} }
// IsNamespaceableKind returns true if x is a namespaceable Gvk // IsNamespaceableKind returns true if x is a namespaceable Gvk,
// e.g. instances of Pod and Deployment are namespaceable,
// but instances of Node and Namespace are not namespaceable.
// Alternative name for this method: IsNotClusterScoped.
// Implements https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/#not-all-objects-are-in-a-namespace // Implements https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/#not-all-objects-are-in-a-namespace
func (x Gvk) IsNamespaceableKind() bool { func (x Gvk) IsNamespaceableKind() bool {
isNamespaceScoped, found := openapi.IsNamespaceScoped(x.toKyamlTypeMeta()) isNamespaceScoped, found := openapi.IsNamespaceScoped(x.toKyamlTypeMeta())

View File

@ -16,14 +16,11 @@ import (
type Factory struct { type Factory struct {
// Makes resources. // Makes resources.
resF *resource.Factory resF *resource.Factory
// Makes ConflictDetectors.
cdf resource.ConflictDetectorFactory
} }
// NewFactory returns a new resmap.Factory. // NewFactory returns a new resmap.Factory.
func NewFactory( func NewFactory(rf *resource.Factory) *Factory {
rf *resource.Factory, cdf resource.ConflictDetectorFactory) *Factory { return &Factory{resF: rf}
return &Factory{resF: rf, cdf: cdf}
} }
// RF returns a resource.Factory. // RF returns a resource.Factory.
@ -126,13 +123,6 @@ func (rmF *Factory) FromSecretArgs(
return rmF.FromResource(res), nil return rmF.FromResource(res), nil
} }
// ConflatePatches creates a new ResMap containing a merger of the
// incoming patches.
// Error if conflict found.
func (rmF *Factory) ConflatePatches(patches []*resource.Resource) (ResMap, error) {
return (&merginator{cdf: rmF.cdf}).ConflatePatches(patches)
}
func newResMapFromResourceSlice( func newResMapFromResourceSlice(
resources []*resource.Resource) (ResMap, error) { resources []*resource.Resource) (ResMap, error) {
result := New() result := New()
@ -146,18 +136,10 @@ func newResMapFromResourceSlice(
} }
// NewResMapFromRNodeSlice returns a ResMap from a slice of RNodes // NewResMapFromRNodeSlice returns a ResMap from a slice of RNodes
func (rmF *Factory) NewResMapFromRNodeSlice(rnodes []*yaml.RNode) (ResMap, error) { func (rmF *Factory) NewResMapFromRNodeSlice(s []*yaml.RNode) (ResMap, error) {
var resources []*resource.Resource rs, err := rmF.resF.ResourcesFromRNodes(s)
for _, rnode := range rnodes { if err != nil {
s, err := rnode.String() return nil, err
if err != nil {
return nil, err
}
r, err := rmF.resF.SliceFromBytes([]byte(s))
if err != nil {
return nil, err
}
resources = append(resources, r...)
} }
return newResMapFromResourceSlice(resources) return newResMapFromResourceSlice(rs)
} }

View File

@ -1,123 +0,0 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package resmap
import (
"fmt"
"sigs.k8s.io/kustomize/api/resource"
)
// merginator coordinates merging the resources in incoming to the result.
type merginator struct {
incoming []*resource.Resource
cdf resource.ConflictDetectorFactory
result ResMap
}
func (m *merginator) ConflatePatches(in []*resource.Resource) (ResMap, error) {
m.result = New()
m.incoming = in
for index := range m.incoming {
alreadyInResult, err := m.appendIfNoMatch(index)
if err != nil {
return nil, err
}
if alreadyInResult != nil {
// The resource at index has the same resId as a previously
// considered resource.
//
// If they conflict with each other (e.g. they both want to change
// the image name in a Deployment, but to different values),
// return an error.
//
// If they don't conflict, then merge them into a single resource,
// since they both target the same item, and we want cumulative
// behavior. E.g. say both patches modify a map. Without a merge,
// the last patch wins, replacing the entire map.
err = m.mergeWithExisting(index, alreadyInResult)
if err != nil {
return nil, err
}
}
}
return m.result, nil
}
func (m *merginator) appendIfNoMatch(index int) (*resource.Resource, error) {
candidate := m.incoming[index]
matchedResources := m.result.GetMatchingResourcesByAnyId(
candidate.OrgId().Equals)
if len(matchedResources) == 0 {
m.result.Append(candidate)
return nil, nil
}
if len(matchedResources) > 1 {
return nil, fmt.Errorf("multiple resources targeted by patch")
}
return matchedResources[0], nil
}
func (m *merginator) mergeWithExisting(
index int, alreadyInResult *resource.Resource) error {
candidate := m.incoming[index]
cd, err := m.cdf.New(candidate.OrgId().Gvk)
if err != nil {
return err
}
hasConflict, err := cd.HasConflict(candidate, alreadyInResult)
if err != nil {
return err
}
if hasConflict {
return m.makeError(cd, index)
}
merged, err := cd.MergePatches(alreadyInResult, candidate)
if err != nil {
return err
}
_, err = m.result.Replace(merged)
return err
}
// Make an error message describing the conflict.
func (m *merginator) makeError(cd resource.ConflictDetector, index int) error {
conflict, err := m.findConflict(cd, index)
if err != nil {
return err
}
if conflict == nil {
return fmt.Errorf("expected conflict for %s", m.incoming[index].OrgId())
}
conflictMap, _ := conflict.Map()
incomingIndexMap, _ := m.incoming[index].Map()
return fmt.Errorf(
"conflict between %#v at index %d and %#v",
incomingIndexMap,
index,
conflictMap,
)
}
// findConflict looks for a conflict in a resource slice.
// It returns the first conflict between the resource at index
// and some other resource. Two resources can only conflict if
// they have the same original ResId.
func (m *merginator) findConflict(
cd resource.ConflictDetector, index int) (*resource.Resource, error) {
targetId := m.incoming[index].OrgId()
for i, p := range m.incoming {
if i == index || !targetId.Equals(p.OrgId()) {
continue
}
conflict, err := cd.HasConflict(p, m.incoming[index])
if err != nil {
return nil, err
}
if conflict {
return p, nil
}
}
return nil, nil
}

View File

@ -33,8 +33,10 @@ type Configurable interface {
} }
// NewPluginHelpers makes an instance of PluginHelpers. // NewPluginHelpers makes an instance of PluginHelpers.
func NewPluginHelpers(ldr ifc.Loader, v ifc.Validator, rf *Factory) *PluginHelpers { func NewPluginHelpers(
return &PluginHelpers{ldr: ldr, v: v, rf: rf} ldr ifc.Loader, v ifc.Validator, rf *Factory,
pc *types.PluginConfig) *PluginHelpers {
return &PluginHelpers{ldr: ldr, v: v, rf: rf, pc: pc}
} }
// PluginHelpers holds things that any or all plugins might need. // PluginHelpers holds things that any or all plugins might need.
@ -44,6 +46,11 @@ type PluginHelpers struct {
ldr ifc.Loader ldr ifc.Loader
v ifc.Validator v ifc.Validator
rf *Factory rf *Factory
pc *types.PluginConfig
}
func (c *PluginHelpers) GeneralConfig() *types.PluginConfig {
return c.pc
} }
func (c *PluginHelpers) Loader() ifc.Loader { func (c *PluginHelpers) Loader() ifc.Loader {
@ -80,6 +87,9 @@ type TransformerPlugin interface {
// resource to transform, try the OrgId first, and if this // resource to transform, try the OrgId first, and if this
// fails or finds too many, it might make sense to then try // fails or finds too many, it might make sense to then try
// the CurrId. Depends on the situation. // the CurrId. Depends on the situation.
//
// TODO: get rid of this interface (use bare resWrangler).
// There aren't multiple implementations any more.
type ResMap interface { type ResMap interface {
// Size reports the number of resources. // Size reports the number of resources.
Size() int Size() int
@ -189,6 +199,9 @@ type ResMap interface {
// Clear removes all resources and Ids. // Clear removes all resources and Ids.
Clear() Clear()
// DropEmpties drops empty resources from the ResMap.
DropEmpties()
// SubsetThatCouldBeReferencedByResource returns a ResMap subset // SubsetThatCouldBeReferencedByResource returns a ResMap subset
// of self with resources that could be referenced by the // of self with resources that could be referenced by the
// resource argument. // resource argument.
@ -231,9 +244,8 @@ type ResMap interface {
// are selected by a Selector // are selected by a Selector
Select(types.Selector) ([]*resource.Resource, error) Select(types.Selector) ([]*resource.Resource, error)
// ToRNodeSlice converts the resources in the resmp // ToRNodeSlice returns a copy of the resources as RNodes.
// to a list of RNodes ToRNodeSlice() []*yaml.RNode
ToRNodeSlice() ([]*yaml.RNode, error)
// ApplySmPatch applies a strategic-merge patch to the // ApplySmPatch applies a strategic-merge patch to the
// selected set of resources. // selected set of resources.

View File

@ -6,14 +6,12 @@ package resmap
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"strings"
"github.com/pkg/errors" "github.com/pkg/errors"
"sigs.k8s.io/kustomize/api/resid" "sigs.k8s.io/kustomize/api/resid"
"sigs.k8s.io/kustomize/api/resource" "sigs.k8s.io/kustomize/api/resource"
"sigs.k8s.io/kustomize/api/types" "sigs.k8s.io/kustomize/api/types"
kyaml_yaml "sigs.k8s.io/kustomize/kyaml/yaml" kyaml "sigs.k8s.io/kustomize/kyaml/yaml"
"sigs.k8s.io/yaml"
) )
// resWrangler implements ResMap. // resWrangler implements ResMap.
@ -38,6 +36,18 @@ func (m *resWrangler) Clear() {
m.rList = nil m.rList = nil
} }
// DropEmpties quickly drops empty resources.
// It doesn't use Append, which checks for Id collisions.
func (m *resWrangler) DropEmpties() {
var rList []*resource.Resource
for _, r := range m.rList {
if !r.IsEmpty() {
rList = append(rList, r)
}
}
m.rList = rList
}
// Size implements ResMap. // Size implements ResMap.
func (m *resWrangler) Size() int { func (m *resWrangler) Size() int {
return len(m.rList) return len(m.rList)
@ -66,22 +76,27 @@ func (m *resWrangler) Append(res *resource.Resource) error {
return fmt.Errorf( return fmt.Errorf(
"may not add resource with an already registered id: %s", id) "may not add resource with an already registered id: %s", id)
} }
m.rList = append(m.rList, res) m.append(res)
return nil return nil
} }
// append appends without performing an Id check
func (m *resWrangler) append(res *resource.Resource) {
m.rList = append(m.rList, res)
}
// Remove implements ResMap. // Remove implements ResMap.
func (m *resWrangler) Remove(adios resid.ResId) error { func (m *resWrangler) Remove(adios resid.ResId) error {
tmp := newOne() var rList []*resource.Resource
for _, r := range m.rList { for _, r := range m.rList {
if r.CurId() != adios { if r.CurId() != adios {
tmp.Append(r) rList = append(rList, r)
} }
} }
if tmp.Size() != m.Size()-1 { if len(rList) != m.Size()-1 {
return fmt.Errorf("id %s not found in removal", adios) return fmt.Errorf("id %s not found in removal", adios)
} }
m.rList = tmp.rList m.rList = rList
return nil return nil
} }
@ -118,16 +133,7 @@ func (m *resWrangler) Debug(title string) {
} else { } else {
fmt.Println("---") fmt.Println("---")
} }
fmt.Printf("# %d %s\n", i, r.OrgId()) fmt.Printf("# %d %s\n%s\n", i, r.OrgId(), r.String())
m, err := r.Map()
if err != nil {
panic(err)
}
blob, err := yaml.Marshal(m)
if err != nil {
panic(err)
}
fmt.Println(string(blob))
} }
} }
@ -273,7 +279,7 @@ func (m *resWrangler) AsYaml() ([]byte, error) {
firstObj := true firstObj := true
var b []byte var b []byte
buf := bytes.NewBuffer(b) buf := bytes.NewBuffer(b)
for _, res := range m.Resources() { for _, res := range m.rList {
out, err := res.AsYAML() out, err := res.AsYAML()
if err != nil { if err != nil {
m, _ := res.Map() m, _ := res.Map()
@ -297,7 +303,7 @@ func (m *resWrangler) AsYaml() ([]byte, error) {
func (m *resWrangler) ErrorIfNotEqualSets(other ResMap) error { func (m *resWrangler) ErrorIfNotEqualSets(other ResMap) error {
m2, ok := other.(*resWrangler) m2, ok := other.(*resWrangler)
if !ok { if !ok {
panic("bad cast") return fmt.Errorf("bad cast to resWrangler 1")
} }
if m.Size() != m2.Size() { if m.Size() != m2.Size() {
return fmt.Errorf( return fmt.Errorf(
@ -317,9 +323,9 @@ func (m *resWrangler) ErrorIfNotEqualSets(other ResMap) error {
"id in self matches %d in other; id: %s", len(others), id) "id in self matches %d in other; id: %s", len(others), id)
} }
r2 := others[0] r2 := others[0]
if !r1.KunstructEqual(r2) { if !r1.NodeEqual(r2) {
return fmt.Errorf( return fmt.Errorf(
"kunstruct not equal: \n -- %s,\n -- %s\n\n--\n%#v\n------\n%#v\n", "nodes unequal: \n -- %s,\n -- %s\n\n--\n%#v\n------\n%#v\n",
r1, r2, r1, r2) r1, r2, r1, r2)
} }
seen[m2.indexOfResource(r2)] = true seen[m2.indexOfResource(r2)] = true
@ -334,7 +340,7 @@ func (m *resWrangler) ErrorIfNotEqualSets(other ResMap) error {
func (m *resWrangler) ErrorIfNotEqualLists(other ResMap) error { func (m *resWrangler) ErrorIfNotEqualLists(other ResMap) error {
m2, ok := other.(*resWrangler) m2, ok := other.(*resWrangler)
if !ok { if !ok {
panic("bad cast") return fmt.Errorf("bad cast to resWrangler 2")
} }
if m.Size() != m2.Size() { if m.Size() != m2.Size() {
return fmt.Errorf( return fmt.Errorf(
@ -388,7 +394,7 @@ func (m *resWrangler) SubsetThatCouldBeReferencedByResource(
} }
result := newOne() result := newOne()
roleBindingNamespaces := getNamespacesForRoleBinding(referrer) roleBindingNamespaces := getNamespacesForRoleBinding(referrer)
for _, possibleTarget := range m.Resources() { for _, possibleTarget := range m.rList {
id := possibleTarget.CurId() id := possibleTarget.CurId()
if !id.IsNamespaceableKind() { if !id.IsNamespaceableKind() {
// A cluster-scoped resource can be referred to by anything. // A cluster-scoped resource can be referred to by anything.
@ -435,16 +441,21 @@ func getNamespacesForRoleBinding(r *resource.Resource) map[string]bool {
return result return result
} }
func (m *resWrangler) append(res *resource.Resource) {
m.rList = append(m.rList, res)
}
// AppendAll implements ResMap. // AppendAll implements ResMap.
func (m *resWrangler) AppendAll(other ResMap) error { func (m *resWrangler) AppendAll(other ResMap) error {
if other == nil { if other == nil {
return nil return nil
} }
for _, res := range other.Resources() { m2, ok := other.(*resWrangler)
if !ok {
return fmt.Errorf("bad cast to resWrangler 3")
}
return m.appendAll(m2.rList)
}
// appendAll appends all the resources, error on Id collision.
func (m *resWrangler) appendAll(list []*resource.Resource) error {
for _, res := range list {
if err := m.Append(res); err != nil { if err := m.Append(res); err != nil {
return err return err
} }
@ -457,7 +468,11 @@ func (m *resWrangler) AbsorbAll(other ResMap) error {
if other == nil { if other == nil {
return nil return nil
} }
for _, r := range other.Resources() { m2, ok := other.(*resWrangler)
if !ok {
return fmt.Errorf("bad cast to resWrangler 4")
}
for _, r := range m2.rList {
err := m.appendReplaceOrMerge(r) err := m.appendReplaceOrMerge(r)
if err != nil { if err != nil {
return err return err
@ -522,7 +537,7 @@ func (m *resWrangler) Select(s types.Selector) ([]*resource.Resource, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, r := range m.Resources() { for _, r := range m.rList {
curId := r.CurId() curId := r.CurId()
orgId := r.OrgId() orgId := r.OrgId()
@ -567,77 +582,39 @@ func (m *resWrangler) Select(s types.Selector) ([]*resource.Resource, error) {
return result, nil return result, nil
} }
// ToRNodeSlice converts the resources in the resmp // ToRNodeSlice returns a copy of the resources as RNodes.
// to a list of RNodes func (m *resWrangler) ToRNodeSlice() []*kyaml.RNode {
func (m *resWrangler) ToRNodeSlice() ([]*kyaml_yaml.RNode, error) { result := make([]*kyaml.RNode, len(m.rList))
var rnodes []*kyaml_yaml.RNode for i := range m.rList {
for _, r := range m.Resources() { result[i] = m.rList[i].AsRNode()
s, err := r.AsYAML()
if err != nil {
return nil, err
}
rnode, err := kyaml_yaml.Parse(string(s))
if err != nil {
return nil, err
}
rnodes = append(rnodes, rnode)
} }
return rnodes, nil return result
} }
// ApplySmPatch applies the patch, and errors on Id collisions.
func (m *resWrangler) ApplySmPatch( func (m *resWrangler) ApplySmPatch(
selectedSet *resource.IdSet, patch *resource.Resource) error { selectedSet *resource.IdSet, patch *resource.Resource) error {
newRm := New() var list []*resource.Resource
for _, res := range m.Resources() { for _, res := range m.rList {
if !selectedSet.Contains(res.CurId()) { if selectedSet.Contains(res.CurId()) {
newRm.Append(res) patchCopy := patch.DeepCopy()
continue patchCopy.CopyMergeMetaDataFieldsFrom(patch)
} patchCopy.SetGvk(res.GetGvk())
patchCopy := patch.DeepCopy() patchCopy.SetKind(patch.GetKind())
patchCopy.CopyMergeMetaDataFieldsFrom(patch) if err := res.ApplySmPatch(patchCopy); err != nil {
patchCopy.SetGvk(res.GetGvk())
err := res.ApplySmPatch(patchCopy)
if err != nil {
// Check for an error string from UnmarshalJSON that's indicative
// of an object that's missing basic KRM fields, and thus may have been
// entirely deleted (an acceptable outcome). This error handling should
// be deleted along with use of ResMap and apimachinery functions like
// UnmarshalJSON.
if !strings.Contains(err.Error(), "Object 'Kind' is missing") {
// Some unknown error, let it through.
return err return err
} }
empty, err := res.IsEmpty()
if err != nil {
return err
}
if !empty {
m, _ := res.Map()
return errors.Wrapf(
err, "with unexpectedly non-empty object map of size %d",
len(m))
}
// Fall through to handle deleted object.
} }
empty, err := res.IsEmpty() if !res.IsEmpty() {
if err != nil { list = append(list, res)
return err
}
if !empty {
// IsEmpty means all fields have been removed from the object.
// This can happen if a patch required deletion of the
// entire resource (not just a part of it). This means
// the overall resmap must shrink by one.
newRm.Append(res)
} }
} }
m.Clear() m.Clear()
m.AppendAll(newRm) return m.appendAll(list)
return nil
} }
func (m *resWrangler) RemoveBuildAnnotations() { func (m *resWrangler) RemoveBuildAnnotations() {
for _, r := range m.Resources() { for _, r := range m.rList {
r.RemoveBuildAnnotations() r.RemoveBuildAnnotations()
} }
} }

View File

@ -1,20 +0,0 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package resource
import "sigs.k8s.io/kustomize/api/resid"
// ConflictDetector detects conflicts between resources.
type ConflictDetector interface {
// HasConflict returns true if the given resources have a conflict.
HasConflict(patch1, patch2 *Resource) (bool, error)
// Merge two resources into one.
MergePatches(patch1, patch2 *Resource) (*Resource, error)
}
// ConflictDetectorFactory makes instances of ConflictDetector that know
// how to handle the given Group, Version, Kind tuple.
type ConflictDetectorFactory interface {
New(gvk resid.Gvk) (ConflictDetector, error)
}

View File

@ -10,28 +10,33 @@ import (
"strings" "strings"
"sigs.k8s.io/kustomize/api/ifc" "sigs.k8s.io/kustomize/api/ifc"
"sigs.k8s.io/kustomize/api/internal/generators"
"sigs.k8s.io/kustomize/api/internal/kusterr" "sigs.k8s.io/kustomize/api/internal/kusterr"
"sigs.k8s.io/kustomize/api/konfig"
"sigs.k8s.io/kustomize/api/resid" "sigs.k8s.io/kustomize/api/resid"
"sigs.k8s.io/kustomize/api/types" "sigs.k8s.io/kustomize/api/types"
"sigs.k8s.io/kustomize/kyaml/kio"
"sigs.k8s.io/kustomize/kyaml/yaml"
) )
// Factory makes instances of Resource. // Factory makes instances of Resource.
type Factory struct { type Factory struct {
kf ifc.KunstructuredFactory hasher ifc.KustHasher
} }
// NewFactory makes an instance of Factory. // NewFactory makes an instance of Factory.
func NewFactory(kf ifc.KunstructuredFactory) *Factory { func NewFactory(h ifc.KustHasher) *Factory {
return &Factory{kf: kf} return &Factory{hasher: h}
} }
func (rf *Factory) Hasher() ifc.KunstructuredHasher { // Hasher returns an ifc.KustHasher
return rf.kf.Hasher() func (rf *Factory) Hasher() ifc.KustHasher {
return rf.hasher
} }
// FromMap returns a new instance of Resource. // FromMap returns a new instance of Resource.
func (rf *Factory) FromMap(m map[string]interface{}) *Resource { func (rf *Factory) FromMap(m map[string]interface{}) *Resource {
return rf.makeOne(rf.kf.FromMap(m), nil) return rf.FromMapAndOption(m, nil)
} }
// FromMapWithName returns a new instance with the given "original" name. // FromMapWithName returns a new instance with the given "original" name.
@ -41,34 +46,30 @@ func (rf *Factory) FromMapWithName(n string, m map[string]interface{}) *Resource
// FromMapWithNamespaceAndName returns a new instance with the given "original" namespace. // FromMapWithNamespaceAndName returns a new instance with the given "original" namespace.
func (rf *Factory) FromMapWithNamespaceAndName(ns string, n string, m map[string]interface{}) *Resource { func (rf *Factory) FromMapWithNamespaceAndName(ns string, n string, m map[string]interface{}) *Resource {
return rf.makeOne(rf.kf.FromMap(m), nil).setPreviousNamespaceAndName(ns, n) r := rf.FromMapAndOption(m, nil)
return r.setPreviousId(ns, n, r.GetKind())
} }
// FromMapAndOption returns a new instance of Resource with given options. // FromMapAndOption returns a new instance of Resource with given options.
func (rf *Factory) FromMapAndOption( func (rf *Factory) FromMapAndOption(
m map[string]interface{}, args *types.GeneratorArgs) *Resource { m map[string]interface{}, args *types.GeneratorArgs) *Resource {
return rf.makeOne(rf.kf.FromMap(m), types.NewGenArgs(args)) n, err := yaml.FromMap(m)
} if err != nil {
// TODO: return err instead of log.
// FromKunstructured returns a new instance of Resource. log.Fatal(err)
func (rf *Factory) FromKunstructured(u ifc.Kunstructured) *Resource { }
return rf.makeOne(u, nil) return rf.makeOne(n, types.NewGenArgs(args))
} }
// makeOne returns a new instance of Resource. // makeOne returns a new instance of Resource.
func (rf *Factory) makeOne( func (rf *Factory) makeOne(rn *yaml.RNode, o *types.GenArgs) *Resource {
u ifc.Kunstructured, o *types.GenArgs) *Resource { if rn == nil {
if u == nil { log.Fatal("RNode must not be null")
log.Fatal("unstruct ifc must not be null")
} }
if o == nil { if o == nil {
o = types.NewGenArgs(nil) o = types.NewGenArgs(nil)
} }
r := &Resource{ return &Resource{node: rn, options: o}
kunStr: u,
options: o,
}
return r
} }
// SliceFromPatches returns a slice of resources given a patch path // SliceFromPatches returns a slice of resources given a patch path
@ -105,47 +106,135 @@ func (rf *Factory) FromBytes(in []byte) (*Resource, error) {
// SliceFromBytes unmarshals bytes into a Resource slice. // SliceFromBytes unmarshals bytes into a Resource slice.
func (rf *Factory) SliceFromBytes(in []byte) ([]*Resource, error) { func (rf *Factory) SliceFromBytes(in []byte) ([]*Resource, error) {
kunStructs, err := rf.kf.SliceFromBytes(in) nodes, err := rf.RNodesFromBytes(in)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var result []*Resource return rf.resourcesFromRNodes(nodes), nil
for len(kunStructs) > 0 { }
u := kunStructs[0]
kunStructs = kunStructs[1:] // ResourcesFromRNodes converts RNodes to Resources.
if strings.HasSuffix(u.GetKind(), "List") { func (rf *Factory) ResourcesFromRNodes(
m, err := u.Map() nodes []*yaml.RNode) (result []*Resource, err error) {
if err != nil { nodes, err = rf.dropBadNodes(nodes)
return nil, err if err != nil {
return nil, err
}
return rf.resourcesFromRNodes(nodes), nil
}
// resourcesFromRNode assumes all nodes are good.
func (rf *Factory) resourcesFromRNodes(
nodes []*yaml.RNode) (result []*Resource) {
for _, n := range nodes {
result = append(result, rf.makeOne(n, nil))
}
return
}
func (rf *Factory) RNodesFromBytes(b []byte) (result []*yaml.RNode, err error) {
nodes, err := kio.FromBytes(b)
if err != nil {
return nil, err
}
nodes, err = rf.dropBadNodes(nodes)
if err != nil {
return nil, err
}
for len(nodes) > 0 {
n0 := nodes[0]
nodes = nodes[1:]
kind := n0.GetKind()
if !strings.HasSuffix(kind, "List") {
result = append(result, n0)
continue
}
// Convert a FooList into a slice of Foo.
var m map[string]interface{}
m, err = n0.Map()
if err != nil {
return nil, err
}
items, ok := m["items"]
if !ok {
// treat as an empty list
continue
}
slice, ok := items.([]interface{})
if !ok {
if items == nil {
// an empty list
continue
} }
items := m["items"] return nil, fmt.Errorf(
itemsSlice, ok := items.([]interface{}) "expected array in %s/items, but found %T", kind, items)
if !ok { }
if items == nil { innerNodes, err := rf.convertObjectSliceToNodeSlice(slice)
// an empty list if err != nil {
continue return nil, err
} }
return nil, fmt.Errorf("items in List is type %T, expected array", items) nodes = append(nodes, innerNodes...)
} }
for _, item := range itemsSlice { return result, nil
itemJSON, err := json.Marshal(item) }
if err != nil {
return nil, err // convertObjectSlice converts a list of objects to a list of RNode.
} func (rf *Factory) convertObjectSliceToNodeSlice(
innerU, err := rf.kf.SliceFromBytes(itemJSON) objects []interface{}) (result []*yaml.RNode, err error) {
if err != nil { var bytes []byte
return nil, err var nodes []*yaml.RNode
} for _, obj := range objects {
// append innerU to kunStructs so nested Lists can be handled bytes, err = json.Marshal(obj)
kunStructs = append(kunStructs, innerU...) if err != nil {
} return
} else { }
result = append(result, rf.FromKunstructured(u)) nodes, err = kio.FromBytes(bytes)
if err != nil {
return
}
nodes, err = rf.dropBadNodes(nodes)
if err != nil {
return
}
result = append(result, nodes...)
}
return
}
// dropBadNodes may drop some nodes from its input argument.
func (rf *Factory) dropBadNodes(nodes []*yaml.RNode) ([]*yaml.RNode, error) {
var result []*yaml.RNode
for _, n := range nodes {
ignore, err := rf.shouldIgnore(n)
if err != nil {
return nil, err
}
if !ignore {
result = append(result, n)
} }
} }
return result, nil return result, nil
} }
// shouldIgnore returns true if there's some reason to ignore the node.
func (rf *Factory) shouldIgnore(n *yaml.RNode) (bool, error) {
if n.IsNilOrEmpty() {
return true, nil
}
md, err := n.GetValidatedMetadata()
if err != nil {
return true, err
}
_, ignore := md.ObjectMeta.Annotations[konfig.IgnoredByKustomizeAnnotation]
if ignore {
return true, nil
}
if foundNil, path := n.HasNilEntryInList(); foundNil {
return true, fmt.Errorf("empty item at %v in object %v", path, n)
}
return false, nil
}
// SliceFromBytesWithNames unmarshals bytes into a Resource slice with specified original // SliceFromBytesWithNames unmarshals bytes into a Resource slice with specified original
// name. // name.
func (rf *Factory) SliceFromBytesWithNames(names []string, in []byte) ([]*Resource, error) { func (rf *Factory) SliceFromBytesWithNames(names []string, in []byte) ([]*Resource, error) {
@ -157,25 +246,25 @@ func (rf *Factory) SliceFromBytesWithNames(names []string, in []byte) ([]*Resour
return nil, fmt.Errorf("number of names doesn't match number of resources") return nil, fmt.Errorf("number of names doesn't match number of resources")
} }
for i, res := range result { for i, res := range result {
res.setPreviousNamespaceAndName(resid.DefaultNamespace, names[i]) res.setPreviousId(resid.DefaultNamespace, names[i], res.GetKind())
} }
return result, nil return result, nil
} }
// MakeConfigMap makes an instance of Resource for ConfigMap // MakeConfigMap makes an instance of Resource for ConfigMap
func (rf *Factory) MakeConfigMap(kvLdr ifc.KvLoader, args *types.ConfigMapArgs) (*Resource, error) { func (rf *Factory) MakeConfigMap(kvLdr ifc.KvLoader, args *types.ConfigMapArgs) (*Resource, error) {
u, err := rf.kf.MakeConfigMap(kvLdr, args) rn, err := generators.MakeConfigMap(kvLdr, args)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return rf.makeOne(u, types.NewGenArgs(&args.GeneratorArgs)), nil return rf.makeOne(rn, types.NewGenArgs(&args.GeneratorArgs)), nil
} }
// MakeSecret makes an instance of Resource for Secret // MakeSecret makes an instance of Resource for Secret
func (rf *Factory) MakeSecret(kvLdr ifc.KvLoader, args *types.SecretArgs) (*Resource, error) { func (rf *Factory) MakeSecret(kvLdr ifc.KvLoader, args *types.SecretArgs) (*Resource, error) {
u, err := rf.kf.MakeSecret(kvLdr, args) rn, err := generators.MakeSecret(kvLdr, args)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return rf.makeOne(u, types.NewGenArgs(&args.GeneratorArgs)), nil return rf.makeOne(rn, types.NewGenArgs(&args.GeneratorArgs)), nil
} }

View File

@ -12,153 +12,184 @@ import (
"sigs.k8s.io/kustomize/api/filters/patchstrategicmerge" "sigs.k8s.io/kustomize/api/filters/patchstrategicmerge"
"sigs.k8s.io/kustomize/api/ifc" "sigs.k8s.io/kustomize/api/ifc"
"sigs.k8s.io/kustomize/api/internal/wrappy"
"sigs.k8s.io/kustomize/api/konfig" "sigs.k8s.io/kustomize/api/konfig"
"sigs.k8s.io/kustomize/api/resid" "sigs.k8s.io/kustomize/api/resid"
"sigs.k8s.io/kustomize/api/types" "sigs.k8s.io/kustomize/api/types"
"sigs.k8s.io/kustomize/kyaml/filtersutil"
"sigs.k8s.io/kustomize/kyaml/kio" "sigs.k8s.io/kustomize/kyaml/kio"
kyaml "sigs.k8s.io/kustomize/kyaml/yaml" kyaml "sigs.k8s.io/kustomize/kyaml/yaml"
"sigs.k8s.io/yaml" "sigs.k8s.io/yaml"
) )
// Resource is a representation of a Kubernetes Resource Model (KRM) object // Resource is an RNode, representing a Kubernetes Resource Model object,
// paired with metadata used by kustomize. // paired with metadata used by kustomize.
// For more history, see sigs.k8s.io/kustomize/api/ifc.Unstructured
type Resource struct { type Resource struct {
kunStr ifc.Kunstructured // TODO: Inline RNode, dropping complexity. Resource is just a decorator.
node *kyaml.RNode
options *types.GenArgs options *types.GenArgs
refBy []resid.ResId refBy []resid.ResId
refVarNames []string refVarNames []string
} }
const ( const (
buildAnnotationPreviousKinds = konfig.ConfigAnnoDomain + "/previousKinds"
buildAnnotationPreviousNames = konfig.ConfigAnnoDomain + "/previousNames" buildAnnotationPreviousNames = konfig.ConfigAnnoDomain + "/previousNames"
buildAnnotationPrefixes = konfig.ConfigAnnoDomain + "/prefixes" buildAnnotationPrefixes = konfig.ConfigAnnoDomain + "/prefixes"
buildAnnotationSuffixes = konfig.ConfigAnnoDomain + "/suffixes" buildAnnotationSuffixes = konfig.ConfigAnnoDomain + "/suffixes"
buildAnnotationPreviousNamespaces = konfig.ConfigAnnoDomain + "/previousNamespaces" buildAnnotationPreviousNamespaces = konfig.ConfigAnnoDomain + "/previousNamespaces"
// the following are only for patches, to specify whether they can change names
// and kinds of their targets
buildAnnotationAllowNameChange = konfig.ConfigAnnoDomain + "/allowNameChange"
buildAnnotationAllowKindChange = konfig.ConfigAnnoDomain + "/allowKindChange"
) )
var buildAnnotations = []string{ var buildAnnotations = []string{
buildAnnotationPreviousKinds,
buildAnnotationPreviousNames, buildAnnotationPreviousNames,
buildAnnotationPrefixes, buildAnnotationPrefixes,
buildAnnotationSuffixes, buildAnnotationSuffixes,
buildAnnotationPreviousNamespaces, buildAnnotationPreviousNamespaces,
buildAnnotationAllowNameChange,
buildAnnotationAllowKindChange,
}
func (r *Resource) AsRNode() *kyaml.RNode {
return r.node.Copy()
} }
func (r *Resource) ResetPrimaryData(incoming *Resource) { func (r *Resource) ResetPrimaryData(incoming *Resource) {
r.kunStr = incoming.Copy() r.node = incoming.node.Copy()
} }
func (r *Resource) GetAnnotations() map[string]string { func (r *Resource) GetAnnotations() map[string]string {
annotations := r.kunStr.GetAnnotations() annotations, err := r.node.GetAnnotations()
if annotations == nil { if err != nil || annotations == nil {
return make(map[string]string) return make(map[string]string)
} }
return annotations return annotations
} }
func (r *Resource) Copy() ifc.Kunstructured {
return r.kunStr.Copy()
}
func (r *Resource) GetFieldValue(f string) (interface{}, error) { func (r *Resource) GetFieldValue(f string) (interface{}, error) {
return r.kunStr.GetFieldValue(f) //nolint:staticcheck
return r.node.GetFieldValue(f)
} }
func (r *Resource) GetDataMap() map[string]string { func (r *Resource) GetDataMap() map[string]string {
return r.kunStr.GetDataMap() return r.node.GetDataMap()
} }
func (r *Resource) GetBinaryDataMap() map[string]string { func (r *Resource) GetBinaryDataMap() map[string]string {
return r.kunStr.GetBinaryDataMap() return r.node.GetBinaryDataMap()
} }
func (r *Resource) GetGvk() resid.Gvk { func (r *Resource) GetGvk() resid.Gvk {
return r.kunStr.GetGvk() meta, err := r.node.GetMeta()
if err != nil {
return resid.GvkFromString("")
}
g, v := resid.ParseGroupVersion(meta.APIVersion)
return resid.Gvk{Group: g, Version: v, Kind: meta.Kind}
}
func (r *Resource) Hash(h ifc.KustHasher) (string, error) {
return h.Hash(r.node)
} }
func (r *Resource) GetKind() string { func (r *Resource) GetKind() string {
return r.kunStr.GetKind() return r.node.GetKind()
} }
func (r *Resource) GetLabels() map[string]string { func (r *Resource) GetLabels() map[string]string {
return r.kunStr.GetLabels() l, err := r.node.GetLabels()
if err != nil {
return map[string]string{}
}
return l
} }
func (r *Resource) GetName() string { func (r *Resource) GetName() string {
return r.kunStr.GetName() return r.node.GetName()
} }
func (r *Resource) GetSlice(p string) ([]interface{}, error) { func (r *Resource) GetSlice(p string) ([]interface{}, error) {
return r.kunStr.GetSlice(p) //nolint:staticcheck
return r.node.GetSlice(p)
} }
func (r *Resource) GetString(p string) (string, error) { func (r *Resource) GetString(p string) (string, error) {
return r.kunStr.GetString(p) //nolint:staticcheck
return r.node.GetString(p)
} }
func (r *Resource) IsEmpty() (bool, error) { func (r *Resource) IsEmpty() bool {
m, err := r.kunStr.Map() return r.node.IsNilOrEmpty()
return len(m) == 0, err
} }
func (r *Resource) Map() (map[string]interface{}, error) { func (r *Resource) Map() (map[string]interface{}, error) {
return r.kunStr.Map() return r.node.Map()
} }
func (r *Resource) MarshalJSON() ([]byte, error) { func (r *Resource) MarshalJSON() ([]byte, error) {
return r.kunStr.MarshalJSON() return r.node.MarshalJSON()
} }
func (r *Resource) MatchesLabelSelector(selector string) (bool, error) { func (r *Resource) MatchesLabelSelector(selector string) (bool, error) {
return r.kunStr.MatchesLabelSelector(selector) return r.node.MatchesLabelSelector(selector)
} }
func (r *Resource) MatchesAnnotationSelector(selector string) (bool, error) { func (r *Resource) MatchesAnnotationSelector(selector string) (bool, error) {
return r.kunStr.MatchesAnnotationSelector(selector) return r.node.MatchesAnnotationSelector(selector)
} }
func (r *Resource) SetAnnotations(m map[string]string) { func (r *Resource) SetAnnotations(m map[string]string) {
if len(m) == 0 { if len(m) == 0 {
// Force field erasure. // Force field erasure.
r.kunStr.SetAnnotations(nil) r.node.SetAnnotations(nil)
return return
} }
r.kunStr.SetAnnotations(m) r.node.SetAnnotations(m)
} }
func (r *Resource) SetDataMap(m map[string]string) { func (r *Resource) SetDataMap(m map[string]string) {
r.kunStr.SetDataMap(m) r.node.SetDataMap(m)
} }
func (r *Resource) SetBinaryDataMap(m map[string]string) { func (r *Resource) SetBinaryDataMap(m map[string]string) {
r.kunStr.SetBinaryDataMap(m) r.node.SetBinaryDataMap(m)
} }
func (r *Resource) SetGvk(gvk resid.Gvk) { func (r *Resource) SetGvk(gvk resid.Gvk) {
r.kunStr.SetGvk(gvk) r.node.SetMapField(
kyaml.NewScalarRNode(gvk.Kind), kyaml.KindField)
r.node.SetMapField(
kyaml.NewScalarRNode(gvk.ApiVersion()), kyaml.APIVersionField)
} }
func (r *Resource) SetLabels(m map[string]string) { func (r *Resource) SetLabels(m map[string]string) {
if len(m) == 0 { if len(m) == 0 {
// Force field erasure. // Force field erasure.
r.kunStr.SetLabels(nil) r.node.SetLabels(nil)
return return
} }
r.kunStr.SetLabels(m) r.node.SetLabels(m)
} }
func (r *Resource) SetName(n string) { func (r *Resource) SetName(n string) {
r.kunStr.SetName(n) r.node.SetName(n)
} }
func (r *Resource) SetNamespace(n string) { func (r *Resource) SetNamespace(n string) {
r.kunStr.SetNamespace(n) r.node.SetNamespace(n)
}
func (r *Resource) SetKind(k string) {
gvk := r.GetGvk()
gvk.Kind = k
r.SetGvk(gvk)
} }
func (r *Resource) UnmarshalJSON(s []byte) error { func (r *Resource) UnmarshalJSON(s []byte) error {
return r.kunStr.UnmarshalJSON(s) return r.node.UnmarshalJSON(s)
} }
// ResCtx is an interface describing the contextual added // ResCtx is an interface describing the contextual added
@ -178,14 +209,14 @@ type ResCtxMatcher func(ResCtx) bool
// DeepCopy returns a new copy of resource // DeepCopy returns a new copy of resource
func (r *Resource) DeepCopy() *Resource { func (r *Resource) DeepCopy() *Resource {
rc := &Resource{ rc := &Resource{
kunStr: r.Copy(), node: r.node.Copy(),
} }
rc.copyOtherFields(r) rc.copyOtherFields(r)
return rc return rc
} }
// CopyMergeMetaDataFields copies everything but the non-metadata in // CopyMergeMetaDataFields copies everything but the non-metadata in
// the ifc.Kunstructured map, merging labels and annotations. // the resource.
func (r *Resource) CopyMergeMetaDataFieldsFrom(other *Resource) { func (r *Resource) CopyMergeMetaDataFieldsFrom(other *Resource) {
r.SetLabels(mergeStringMaps(other.GetLabels(), r.GetLabels())) r.SetLabels(mergeStringMaps(other.GetLabels(), r.GetLabels()))
r.SetAnnotations( r.SetAnnotations(
@ -251,8 +282,10 @@ func (r *Resource) ReferencesEqual(other *Resource) bool {
return len(setSelf) == len(setOther) return len(setSelf) == len(setOther)
} }
func (r *Resource) KunstructEqual(o *Resource) bool { // NodeEqual returns true if the resource's nodes are
return reflect.DeepEqual(r.kunStr, o.kunStr) // equal, ignoring ancillary information like genargs, refby, etc.
func (r *Resource) NodeEqual(o *Resource) bool {
return reflect.DeepEqual(r.node, o.node)
} }
func (r *Resource) copyRefBy() []resid.ResId { func (r *Resource) copyRefBy() []resid.ResId {
@ -351,12 +384,41 @@ func (r *Resource) RemoveBuildAnnotations() {
r.SetAnnotations(annotations) r.SetAnnotations(annotations)
} }
func (r *Resource) setPreviousNamespaceAndName(ns string, n string) *Resource { func (r *Resource) setPreviousId(ns string, n string, k string) *Resource {
r.appendCsvAnnotation(buildAnnotationPreviousNames, n) r.appendCsvAnnotation(buildAnnotationPreviousNames, n)
r.appendCsvAnnotation(buildAnnotationPreviousNamespaces, ns) r.appendCsvAnnotation(buildAnnotationPreviousNamespaces, ns)
r.appendCsvAnnotation(buildAnnotationPreviousKinds, k)
return r return r
} }
func (r *Resource) SetAllowNameChange(value string) {
annotations := r.GetAnnotations()
annotations[buildAnnotationAllowNameChange] = value
r.SetAnnotations(annotations)
}
func (r *Resource) NameChangeAllowed() bool {
annotations := r.GetAnnotations()
if allowed, set := annotations[buildAnnotationAllowNameChange]; set && allowed == "true" {
return true
}
return false
}
func (r *Resource) SetAllowKindChange(value string) {
annotations := r.GetAnnotations()
annotations[buildAnnotationAllowKindChange] = value
r.SetAnnotations(annotations)
}
func (r *Resource) KindChangeAllowed() bool {
annotations := r.GetAnnotations()
if allowed, set := annotations[buildAnnotationAllowKindChange]; set && allowed == "true" {
return true
}
return false
}
// String returns resource as JSON. // String returns resource as JSON.
func (r *Resource) String() string { func (r *Resource) String() string {
bs, err := r.MarshalJSON() bs, err := r.MarshalJSON()
@ -430,14 +492,19 @@ func (r *Resource) PrevIds() []resid.ResId {
// pairs on one annotation so there is no chance of error // pairs on one annotation so there is no chance of error
names := r.getCsvAnnotation(buildAnnotationPreviousNames) names := r.getCsvAnnotation(buildAnnotationPreviousNames)
ns := r.getCsvAnnotation(buildAnnotationPreviousNamespaces) ns := r.getCsvAnnotation(buildAnnotationPreviousNamespaces)
if len(names) != len(ns) { kinds := r.getCsvAnnotation(buildAnnotationPreviousKinds)
if len(names) != len(ns) || len(names) != len(kinds) {
panic(errors.New( panic(errors.New(
"number of previous names not equal to " + "number of previous names, " +
"number of previous namespaces")) "number of previous namespaces, " +
"number of previous kinds not equal"))
} }
for i := range names { for i := range names {
k := kinds[i]
gvk := r.GetGvk()
gvk.Kind = k
ids = append(ids, resid.NewResIdWithNamespace( ids = append(ids, resid.NewResIdWithNamespace(
r.GetGvk(), names[i], ns[i])) gvk, names[i], ns[i]))
} }
return ids return ids
} }
@ -445,7 +512,7 @@ func (r *Resource) PrevIds() []resid.ResId {
// StorePreviousId stores the resource's current ID via build annotations. // StorePreviousId stores the resource's current ID via build annotations.
func (r *Resource) StorePreviousId() { func (r *Resource) StorePreviousId() {
id := r.CurId() id := r.CurId()
r.setPreviousNamespaceAndName(id.EffectiveNamespace(), id.Name) r.setPreviousId(id.EffectiveNamespace(), id.Name, id.Kind)
} }
// CurId returns a ResId for the resource using the // CurId returns a ResId for the resource using the
@ -478,38 +545,35 @@ func (r *Resource) AppendRefVarName(variable types.Var) {
// ApplySmPatch applies the provided strategic merge patch. // ApplySmPatch applies the provided strategic merge patch.
func (r *Resource) ApplySmPatch(patch *Resource) error { func (r *Resource) ApplySmPatch(patch *Resource) error {
node, err := filtersutil.GetRNode(patch) n, ns, k := r.GetName(), r.GetNamespace(), r.GetKind()
if err != nil { if patch.NameChangeAllowed() || patch.KindChangeAllowed() {
r.StorePreviousId()
}
if err := r.ApplyFilter(patchstrategicmerge.Filter{
Patch: patch.node,
}); err != nil {
return err return err
} }
n, ns := r.GetName(), r.GetNamespace() if r.IsEmpty() {
err = r.ApplyFilter(patchstrategicmerge.Filter{ return nil
Patch: node,
})
if err != nil {
return err
} }
empty, err := r.IsEmpty() if !patch.KindChangeAllowed() {
if err != nil { r.SetKind(k)
return err
} }
if !empty { if !patch.NameChangeAllowed() {
r.SetName(n) r.SetName(n)
r.SetNamespace(ns)
} }
return err r.SetNamespace(ns)
return nil
} }
func (r *Resource) ApplyFilter(f kio.Filter) error { func (r *Resource) ApplyFilter(f kio.Filter) error {
if wn, ok := r.kunStr.(*wrappy.WNode); ok { l, err := f.Filter([]*kyaml.RNode{r.node})
l, err := f.Filter([]*kyaml.RNode{wn.AsRNode()}) if len(l) == 0 {
if len(l) == 0 { // The node was deleted. The following makes r.IsEmpty() true.
// Hack to deal with deletion. r.node = nil
r.kunStr = wrappy.NewWNode()
}
return err
} }
return filtersutil.ApplyToJSON(f, r) return err
} }
func mergeStringMaps(maps ...map[string]string) map[string]string { func mergeStringMaps(maps ...map[string]string) map[string]string {

View File

@ -23,7 +23,7 @@ func (e *errUnableToFind) Error() string {
m = append(m, "('"+p.Value+"'; "+p.Key+")") m = append(m, "('"+p.Value+"'; "+p.Key+")")
} }
return fmt.Sprintf( return fmt.Sprintf(
"unable to find plugin root - tried: %s", strings.Join(m, ", ")) "unable to find %s - tried: %s", e.what, strings.Join(m, ", "))
} }
func NewErrUnableToFind(w string, a []Pair) *errUnableToFind { func NewErrUnableToFind(w string, a []Pair) *errUnableToFind {

View File

@ -9,8 +9,7 @@ import (
"sigs.k8s.io/kustomize/api/resid" "sigs.k8s.io/kustomize/api/resid"
) )
// FieldSpec completely specifies a kustomizable field in // FieldSpec completely specifies a kustomizable field in a k8s API object.
// an unstructured representation of a k8s API object.
// It helps define the operands of transformations. // It helps define the operands of transformations.
// //
// For example, a directive to add a common label to objects // For example, a directive to add a common label to objects

View File

@ -3,14 +3,77 @@
package types package types
// HelmChartArgs contains the metadata of how to generate a secret. type HelmGlobals struct {
// ChartHome is a file path, relative to the kustomization root,
// to a directory containing a subdirectory for each chart to be
// included in the kustomization.
// The default value of this field is "charts".
// So, for example, kustomize looks for the minecraft chart
// at {kustomizationRoot}/{ChartHome}/minecraft.
// If the chart is there at build time, kustomize will use it as found,
// and not check version numbers or dates.
// If the chart is not there, kustomize will attempt to pull it
// using the version number specified in the kustomization file,
// and put it there. To suppress the pull attempt, simply assure
// that the chart is already there.
ChartHome string `json:"chartHome,omitempty" yaml:"chartHome,omitempty"`
// ConfigHome defines a value that kustomize should pass to helm via
// the HELM_CONFIG_HOME environment variable. kustomize doesn't attempt
// to read or write this directory.
// If omitted, {tmpDir}/helm is used, where {tmpDir} is some temporary
// directory created by kustomize for the benefit of helm.
// Likewise, kustomize sets
// HELM_CACHE_HOME={ConfigHome}/.cache
// HELM_DATA_HOME={ConfigHome}/.data
// for the helm subprocess.
ConfigHome string `json:"configHome,omitempty" yaml:"configHome,omitempty"`
}
type HelmChart struct {
// Name is the name of the chart, e.g. 'minecraft'.
Name string `json:"name,omitempty" yaml:"name,omitempty"`
// Version is the version of the chart, e.g. '3.1.3'
Version string `json:"version,omitempty" yaml:"version,omitempty"`
// Repo is a URL locating the chart on the internet.
// This is the argument to helm's `--repo` flag, e.g.
// `https://itzg.github.io/minecraft-server-charts`.
Repo string `json:"repo,omitempty" yaml:"repo,omitempty"`
// ReleaseName replaces RELEASE-NAME in chart template output,
// making a particular inflation of a chart unique with respect to
// other inflations of the same chart in a cluster. It's the first
// argument to the helm `install` and `template` commands, i.e.
// helm install {RELEASE-NAME} {chartName}
// helm template {RELEASE-NAME} {chartName}
// If omitted, the flag --generate-name is passed to 'helm template'.
ReleaseName string `json:"releaseName,omitempty" yaml:"releaseName,omitempty"`
// ValuesFile is local file path to a values file to use _instead of_
// the default values that accompanied the chart.
// The default values are in '{ChartHome}/{Name}/values.yaml'.
ValuesFile string `json:"valuesFile,omitempty" yaml:"valuesFile,omitempty"`
// ValuesInline holds value mappings specified directly,
// rather than in a separate file.
ValuesInline map[string]interface{} `json:"valuesInline,omitempty" yaml:"valuesInline,omitempty"`
// ValuesMerge specifies how to treat ValuesInline with respect to Values.
// Legal values: 'merge', 'override', 'replace'.
// Defaults to 'override'.
ValuesMerge string `json:"valuesMerge,omitempty" yaml:"valuesMerge,omitempty"`
}
// HelmChartArgs contains arguments to helm.
// Deprecated. Use HelmGlobals and HelmChart instead.
type HelmChartArgs struct { type HelmChartArgs struct {
ChartName string `json:"chartName,omitempty" yaml:"chartName,omitempty"` ChartName string `json:"chartName,omitempty" yaml:"chartName,omitempty"`
ChartVersion string `json:"chartVersion,omitempty" yaml:"chartVersion,omitempty"` ChartVersion string `json:"chartVersion,omitempty" yaml:"chartVersion,omitempty"`
ChartRepoURL string `json:"chartRepoUrl,omitempty" yaml:"chartRepoUrl,omitempty"` ChartRepoURL string `json:"chartRepoUrl,omitempty" yaml:"chartRepoUrl,omitempty"`
ChartHome string `json:"chartHome,omitempty" yaml:"chartHome,omitempty"` ChartHome string `json:"chartHome,omitempty" yaml:"chartHome,omitempty"`
// Use chartRelease to keep compatible with old exec plugin ChartRepoName string `json:"chartRepoName,omitempty" yaml:"chartRepoName,omitempty"`
ChartRepoName string `json:"chartRelease,omitempty" yaml:"chartRelease,omitempty"`
HelmBin string `json:"helmBin,omitempty" yaml:"helmBin,omitempty"` HelmBin string `json:"helmBin,omitempty" yaml:"helmBin,omitempty"`
HelmHome string `json:"helmHome,omitempty" yaml:"helmHome,omitempty"` HelmHome string `json:"helmHome,omitempty" yaml:"helmHome,omitempty"`
Values string `json:"values,omitempty" yaml:"values,omitempty"` Values string `json:"values,omitempty" yaml:"values,omitempty"`
@ -20,3 +83,32 @@ type HelmChartArgs struct {
ReleaseNamespace string `json:"releaseNamespace,omitempty" yaml:"releaseNamespace,omitempty"` ReleaseNamespace string `json:"releaseNamespace,omitempty" yaml:"releaseNamespace,omitempty"`
ExtraArgs []string `json:"extraArgs,omitempty" yaml:"extraArgs,omitempty"` ExtraArgs []string `json:"extraArgs,omitempty" yaml:"extraArgs,omitempty"`
} }
// SplitHelmParameters splits helm parameters into
// per-chart params and global chart-independent parameters.
func SplitHelmParameters(
oldArgs []HelmChartArgs) (charts []HelmChart, globals HelmGlobals) {
for _, old := range oldArgs {
charts = append(charts, makeHelmChartFromHca(&old))
if old.HelmHome != "" {
// last non-empty wins
globals.ConfigHome = old.HelmHome
}
if old.ChartHome != "" {
// last non-empty wins
globals.ChartHome = old.ChartHome
}
}
return charts, globals
}
func makeHelmChartFromHca(old *HelmChartArgs) (c HelmChart) {
c.Name = old.ChartName
c.Version = old.ChartVersion
c.Repo = old.ChartRepoURL
c.ValuesFile = old.Values
c.ValuesInline = old.ValuesLocal
c.ValuesMerge = old.ValuesMerge
c.ReleaseName = old.ReleaseName
return
}

View File

@ -6,6 +6,7 @@ package types
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt"
"sigs.k8s.io/yaml" "sigs.k8s.io/yaml"
) )
@ -46,6 +47,9 @@ type Kustomization struct {
// CommonLabels to add to all objects and selectors. // CommonLabels to add to all objects and selectors.
CommonLabels map[string]string `json:"commonLabels,omitempty" yaml:"commonLabels,omitempty"` CommonLabels map[string]string `json:"commonLabels,omitempty" yaml:"commonLabels,omitempty"`
// Labels to add to all objects but not selectors.
Labels []Label `json:"labels,omitempty" yaml:"labels,omitempty"`
// CommonAnnotations to add to all objects. // CommonAnnotations to add to all objects.
CommonAnnotations map[string]string `json:"commonAnnotations,omitempty" yaml:"commonAnnotations,omitempty"` CommonAnnotations map[string]string `json:"commonAnnotations,omitempty" yaml:"commonAnnotations,omitempty"`
@ -125,9 +129,14 @@ type Kustomization struct {
// the map will have a suffix hash generated from its contents. // the map will have a suffix hash generated from its contents.
SecretGenerator []SecretArgs `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty"` SecretGenerator []SecretArgs `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty"`
// HelmGlobals contains helm configuration that isn't chart specific.
HelmGlobals *HelmGlobals `json:"helmGlobals,omitempty" yaml:"helmGlobals,omitempty"`
// HelmCharts is a list of helm chart configuration instances.
HelmCharts []HelmChart `json:"helmCharts,omitempty" yaml:"helmCharts,omitempty"`
// HelmChartInflationGenerator is a list of helm chart configurations. // HelmChartInflationGenerator is a list of helm chart configurations.
// The resulting resource is a normal operand rendered from // Deprecated. Auto-converted to HelmGlobals and HelmCharts.
// a remote chart by `helm template`
HelmChartInflationGenerator []HelmChartArgs `json:"helmChartInflationGenerator,omitempty" yaml:"helmChartInflationGenerator,omitempty"` HelmChartInflationGenerator []HelmChartArgs `json:"helmChartInflationGenerator,omitempty" yaml:"helmChartInflationGenerator,omitempty"`
// GeneratorOptions modify behavior of all ConfigMap and Secret generators. // GeneratorOptions modify behavior of all ConfigMap and Secret generators.
@ -181,15 +190,42 @@ func (k *Kustomization) FixKustomizationPostUnmarshalling() {
k.SecretGenerator[i].EnvSource = "" k.SecretGenerator[i].EnvSource = ""
} }
} }
charts, globals := SplitHelmParameters(k.HelmChartInflationGenerator)
if k.HelmGlobals == nil {
if globals.ChartHome != "" || globals.ConfigHome != "" {
k.HelmGlobals = &globals
}
}
k.HelmCharts = append(k.HelmCharts, charts...)
// Wipe it for the fix command.
k.HelmChartInflationGenerator = nil
} }
// FixKustomizationPreMarshalling fixes things // FixKustomizationPreMarshalling fixes things
// that should occur after the kustomization file // that should occur after the kustomization file
// has been processed. // has been processed.
func (k *Kustomization) FixKustomizationPreMarshalling() { func (k *Kustomization) FixKustomizationPreMarshalling() error {
// PatchesJson6902 should be under the Patches field. // PatchesJson6902 should be under the Patches field.
k.Patches = append(k.Patches, k.PatchesJson6902...) k.Patches = append(k.Patches, k.PatchesJson6902...)
k.PatchesJson6902 = nil k.PatchesJson6902 = nil
// this fix is not in FixKustomizationPostUnmarshalling because
// it will break some commands like `create` and `add`. those
// commands depend on 'commonLabels' field
if cl := labelFromCommonLabels(k.CommonLabels); cl != nil {
// check conflicts between commonLabels and labels
for _, l := range k.Labels {
for k := range l.Pairs {
if _, exist := cl.Pairs[k]; exist {
return fmt.Errorf("label name '%s' exists in both commonLabels and labels", k)
}
}
}
k.Labels = append(k.Labels, *cl)
k.CommonLabels = nil
}
return nil
} }
func (k *Kustomization) EnforceFields() []string { func (k *Kustomization) EnforceFields() []string {

25
vendor/sigs.k8s.io/kustomize/api/types/labels.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package types
type Label struct {
// Pairs contains the key-value pairs for labels to add
Pairs map[string]string `json:"pairs,omitempty" yaml:"pairs,omitempty"`
// IncludeSelectors inidicates should transformer include the
// fieldSpecs for selectors. Custom fieldSpecs specified by
// FieldSpecs will be merged with builtin fieldSpecs if this
// is true.
IncludeSelectors bool `json:"includeSelectors,omitempty" yaml:"includeSelectors,omitempty"`
FieldSpecs []FieldSpec `json:"fields,omitempty" yaml:"fields,omitempty"`
}
func labelFromCommonLabels(commonLabels map[string]string) *Label {
if len(commonLabels) == 0 {
return nil
}
return &Label{
Pairs: commonLabels,
IncludeSelectors: true,
}
}

View File

@ -3,6 +3,8 @@
package types package types
import "reflect"
// Patch represent either a Strategic Merge Patch or a JSON patch // Patch represent either a Strategic Merge Patch or a JSON patch
// and its targets. // and its targets.
// The content of the patch can either be from a file // The content of the patch can either be from a file
@ -16,6 +18,9 @@ type Patch struct {
// Target points to the resources that the patch is applied to // Target points to the resources that the patch is applied to
Target *Selector `json:"target,omitempty" yaml:"target,omitempty"` Target *Selector `json:"target,omitempty" yaml:"target,omitempty"`
// Options is a list of options for the patch
Options map[string]bool `json:"options,omitempty" yaml:"options,omitempty"`
} }
// Equals return true if p equals o. // Equals return true if p equals o.
@ -24,5 +29,6 @@ func (p *Patch) Equals(o Patch) bool {
(p.Target != nil && o.Target != nil && *p.Target == *o.Target) (p.Target != nil && o.Target != nil && *p.Target == *o.Target)
return p.Path == o.Path && return p.Path == o.Path &&
p.Patch == o.Patch && p.Patch == o.Patch &&
targetEqual targetEqual &&
reflect.DeepEqual(p.Options, o.Options)
} }

View File

@ -3,27 +3,13 @@
package types package types
type HelmConfig struct {
Enabled bool
Command string
}
// PluginConfig holds plugin configuration. // PluginConfig holds plugin configuration.
type PluginConfig struct { type PluginConfig struct {
// AbsPluginHome is the home of kustomize plugins.
// Kustomize plugin configuration files are k8s-style objects
// containing the fields 'apiVersion' and 'kind', e.g.
// apiVersion: apps/v1
// kind: Deployment
// kustomize reads plugin configuration data from a file path
// specified in the 'generators:' or 'transformers:' field of a
// kustomization file. kustomize must then use this data to both
// locate the plugin and configure it.
// Every kustomize plugin (its code, its tests, its supporting data
// files, etc.) must be housed in its own directory at
// ${AbsPluginHome}/${pluginApiVersion}/LOWERCASE(${pluginKind})
// where
// - ${AbsPluginHome} is an absolute path, defined below.
// - ${pluginApiVersion} is taken from the plugin config file.
// - ${pluginKind} is taken from the plugin config file.
// The value of AbsPluginHome can be any absolute path.
AbsPluginHome string
// PluginRestrictions distinguishes plugin restrictions. // PluginRestrictions distinguishes plugin restrictions.
PluginRestrictions PluginRestrictions PluginRestrictions PluginRestrictions
@ -32,4 +18,30 @@ type PluginConfig struct {
// FnpLoadingOptions sets the way function-based plugin behaviors. // FnpLoadingOptions sets the way function-based plugin behaviors.
FnpLoadingOptions FnPluginLoadingOptions FnpLoadingOptions FnPluginLoadingOptions
// HelmConfig contains metadata needed for allowing and running helm.
HelmConfig HelmConfig
}
func EnabledPluginConfig(b BuiltinPluginLoadingOptions) (pc *PluginConfig) {
pc = MakePluginConfig(PluginRestrictionsNone, b)
pc.FnpLoadingOptions.EnableStar = true
pc.HelmConfig.Enabled = true
// If this command is not on PATH, tests needing it should skip.
pc.HelmConfig.Command = "helmV3"
return
}
func DisabledPluginConfig() *PluginConfig {
return MakePluginConfig(
PluginRestrictionsBuiltinsOnly,
BploUseStaticallyLinked)
}
func MakePluginConfig(pr PluginRestrictions,
b BuiltinPluginLoadingOptions) *PluginConfig {
return &PluginConfig{
PluginRestrictions: pr,
BpLoadingOptions: b,
}
} }

View File

@ -1,27 +1,59 @@
// Copyright 2019 The Kubernetes Authors. // Copyright 2021 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package types package types
const DefaultReplacementFieldPath = "metadata.name"
// Replacement defines how to perform a substitution // Replacement defines how to perform a substitution
// where it is from and where it is to. // where it is from and where it is to.
type Replacement struct { type Replacement struct {
Source *ReplSource `json:"source" yaml:"source"` // The source of the value.
Target *ReplTarget `json:"target" yaml:"target"` Source *SourceSelector `json:"source" yaml:"source"`
// The N fields to write the value to.
Targets []*TargetSelector `json:"targets" yaml:"targets"`
} }
// ReplSource defines where a substitution is from // SourceSelector is the source of the replacement transformer.
// It can from two different kinds of sources type SourceSelector struct {
// - from a field of one resource // A specific object to read it from.
// - from a string KrmId `json:",inline,omitempty" yaml:",inline,omitempty"`
type ReplSource struct {
ObjRef *Target `json:"objref,omitempty" yaml:"objref,omitempty"` // Structured field path expected in the allowed object.
FieldRef string `json:"fieldref,omitempty" yaml:"fiedldref,omitempty"` FieldPath string `json:"fieldPath" yaml:"fieldPath"`
Value string `json:"value,omitempty" yaml:"value,omitempty"`
// Used to refine the interpretation of the field.
Options *FieldOptions `json:"options" yaml:"options"`
} }
// ReplTarget defines where a substitution is to. // TargetSelector specifies fields in one or more objects.
type ReplTarget struct { type TargetSelector struct {
ObjRef *Selector `json:"objref,omitempty" yaml:"objref,omitempty"` // Include objects that match this.
FieldRefs []string `json:"fieldrefs,omitempty" yaml:"fieldrefs,omitempty"` Select *Selector `json:"select" yaml:"select"`
// From the allowed set, remove objects that match this.
Reject []*Selector `json:"reject" yaml:"reject"`
// Structured field paths expected in each allowed object.
FieldPaths []string `json:"fieldPaths" yaml:"fieldPaths"`
// Used to refine the interpretation of the field.
Options *FieldOptions `json:"options" yaml:"options"`
}
// FieldOptions refine the interpretation of FieldPaths.
type FieldOptions struct {
// Used to split/join the field.
Delimiter string `json:"delimiter" yaml:"delimiter"`
// Which position in the split to consider.
Index int `json:"index" yaml:"index"`
// TODO (#3492): Implement use of this option
// None, Base64, URL, Hex, etc
Encoding string `json:"encoding" yaml:"encoding"`
// If field missing, add it.
Create bool `json:"create" yaml:"create"`
} }

View File

@ -13,9 +13,8 @@ import (
// Any resource that matches intersection of all conditions // Any resource that matches intersection of all conditions
// is included in this set. // is included in this set.
type Selector struct { type Selector struct {
resid.Gvk `json:",inline,omitempty" yaml:",inline,omitempty"` // KrmId refers to a GVKN/Ns of a resource.
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` KrmId `json:",inline,omitempty" yaml:",inline,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
// AnnotationSelector is a string that follows the label selection expression // AnnotationSelector is a string that follows the label selection expression
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api
@ -28,6 +27,23 @@ type Selector struct {
LabelSelector string `json:"labelSelector,omitempty" yaml:"labelSelector,omitempty"` LabelSelector string `json:"labelSelector,omitempty" yaml:"labelSelector,omitempty"`
} }
// KrmId refers to a GVKN/Ns of a resource.
type KrmId struct {
resid.Gvk `json:",inline,omitempty" yaml:",inline,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
}
// Match returns true if id selects other, i.e. id's fields
// either match other's or are empty
func (id *KrmId) Match(other *KrmId) bool {
return (id.Group == "" || id.Group == other.Group) &&
(id.Version == "" || id.Version == other.Version) &&
(id.Kind == "" || id.Kind == other.Kind) &&
(id.Name == "" || id.Name == other.Name) &&
(id.Namespace == "" || id.Namespace == other.Namespace)
}
// SelectorRegex is a Selector with regex in GVK // SelectorRegex is a Selector with regex in GVK
// Any resource that matches intersection of all conditions // Any resource that matches intersection of all conditions
// is included in this set. // is included in this set.

View File

@ -12,8 +12,6 @@ import (
"sigs.k8s.io/kustomize/api/resid" "sigs.k8s.io/kustomize/api/resid"
) )
const defaultFieldPath = "metadata.name"
// Var represents a variable whose value will be sourced // Var represents a variable whose value will be sourced
// from a field in a Kubernetes object. // from a field in a Kubernetes object.
type Var struct { type Var struct {
@ -71,7 +69,7 @@ type FieldSelector struct {
// defaulting sets reference to field used by default. // defaulting sets reference to field used by default.
func (v *Var) Defaulting() { func (v *Var) Defaulting() {
if v.FieldRef.FieldPath == "" { if v.FieldRef.FieldPath == "" {
v.FieldRef.FieldPath = defaultFieldPath v.FieldRef.FieldPath = DefaultReplacementFieldPath
} }
v.ObjRef.GVK() v.ObjRef.GVK()
} }

Some files were not shown because too many files have changed in this diff Show More