mirror of https://github.com/k3s-io/k3s
Update to v1.21.2 (#3479)
* Update to v1.21.2 Signed-off-by: dereknola <derek.nola@suse.com>pull/3510/head v1.21.2+k3s1
parent
c35761d169
commit
5a67e8dc47
80
go.mod
80
go.mod
|
@ -24,7 +24,7 @@ replace (
|
|||
github.com/kubernetes-sigs/cri-tools => github.com/k3s-io/cri-tools v1.21.0-k3s1
|
||||
github.com/matryer/moq => github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009
|
||||
// LOOK TO scripts/download FOR THE VERSION OF runc THAT WE ARE BUILDING/SHIPPING
|
||||
github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-rc93.0.20210414171415-3397a09ee932
|
||||
github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-rc95
|
||||
github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v1.0.3-0.20210316141917-a8c4a9ee0f6b
|
||||
github.com/rancher/k3s/pkg/data => ./pkg/data
|
||||
go.etcd.io/etcd => github.com/k3s-io/etcd v0.5.0-alpha.5.0.20201208200253-50621aee4aea
|
||||
|
@ -34,34 +34,34 @@ replace (
|
|||
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884
|
||||
google.golang.org/grpc => google.golang.org/grpc v1.27.1
|
||||
gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
|
||||
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.1-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.1-k3s1
|
||||
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.1-k3s1
|
||||
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.1-k3s1
|
||||
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.1-k3s1
|
||||
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.1-k3s1
|
||||
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.1-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.1-k3s1
|
||||
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.1-k3s1
|
||||
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.1-k3s1
|
||||
k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.1-k3s1
|
||||
k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.1-k3s1
|
||||
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.1-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.1-k3s1
|
||||
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.1-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.1-k3s1
|
||||
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.1-k3s1
|
||||
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.1-k3s1
|
||||
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.1-k3s1
|
||||
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.1-k3s1
|
||||
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.21.1-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.1-k3s1
|
||||
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.1-k3s1
|
||||
k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.1-k3s1
|
||||
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.21.1-k3s1
|
||||
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.21.1-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.21.1-k3s1
|
||||
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.21.1-k3s1
|
||||
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.2-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.2-k3s1
|
||||
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.2-k3s1
|
||||
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.2-k3s1
|
||||
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.2-k3s1
|
||||
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.2-k3s1
|
||||
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.2-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.2-k3s1
|
||||
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.2-k3s1
|
||||
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.2-k3s1
|
||||
k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.2-k3s1
|
||||
k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.2-k3s1
|
||||
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.2-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.2-k3s1
|
||||
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.2-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.2-k3s1
|
||||
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.2-k3s1
|
||||
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.2-k3s1
|
||||
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.2-k3s1
|
||||
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.2-k3s1
|
||||
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.21.2-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.2-k3s1
|
||||
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.2-k3s1
|
||||
k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.2-k3s1
|
||||
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.21.2-k3s1
|
||||
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.21.2-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.21.2-k3s1
|
||||
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.21.2-k3s1
|
||||
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7
|
||||
)
|
||||
|
||||
|
@ -94,7 +94,7 @@ require (
|
|||
github.com/moby/sys/symlink v0.1.0 // indirect
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible
|
||||
// LOOK TO scripts/download FOR THE VERSION OF runc THAT WE ARE BUILDING/SHIPPING
|
||||
github.com/opencontainers/runc v1.0.0-rc93
|
||||
github.com/opencontainers/runc v1.0.0-rc95
|
||||
github.com/opencontainers/selinux v1.8.0
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
|
@ -113,20 +113,20 @@ require (
|
|||
go.etcd.io/etcd v0.5.0-alpha.5.0.20201208200253-50621aee4aea
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887
|
||||
google.golang.org/grpc v1.37.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.21.1
|
||||
k8s.io/apimachinery v0.21.1
|
||||
k8s.io/apiserver v0.21.1
|
||||
k8s.io/api v0.21.2
|
||||
k8s.io/apimachinery v0.21.2
|
||||
k8s.io/apiserver v0.21.2
|
||||
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
|
||||
k8s.io/cloud-provider v0.21.1
|
||||
k8s.io/component-base v0.21.1
|
||||
k8s.io/controller-manager v0.21.1
|
||||
k8s.io/cri-api v0.21.1
|
||||
k8s.io/cloud-provider v0.21.2
|
||||
k8s.io/component-base v0.21.2
|
||||
k8s.io/controller-manager v0.21.2
|
||||
k8s.io/cri-api v0.21.2
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/kubectl v0.21.1
|
||||
k8s.io/kubernetes v1.21.1
|
||||
k8s.io/kubectl v0.21.2
|
||||
k8s.io/kubernetes v1.21.2
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
|
106
go.sum
106
go.sum
|
@ -544,55 +544,55 @@ github.com/k3s-io/helm-controller v0.9.1 h1:qtHWTNHiuCPRbA2YZ7z7jTgSHo7Yc5He52oM
|
|||
github.com/k3s-io/helm-controller v0.9.1/go.mod h1:nZP8FH3KZrNNUf5r+SwwiMR63HS6lxdHdpHijgPfF74=
|
||||
github.com/k3s-io/kine v0.6.0 h1:4l7wjgCxb2oD+7Hyf3xIhkGd/6s1sXpRFdQiyy+7Ki8=
|
||||
github.com/k3s-io/kine v0.6.0/go.mod h1:rzCs93+rQHZGOiewMd84PDrER92QeZ6eeHbWkfEy4+w=
|
||||
github.com/k3s-io/kubernetes v1.21.1-k3s1 h1:X8nEv12/bI3iR2+ARLuzvosPW8iMOisMlklOAeovISw=
|
||||
github.com/k3s-io/kubernetes v1.21.1-k3s1/go.mod h1:ef++isEL1PW0taH6z7DXrSztPglrZ7jQhyvcMEtm0gQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.1-k3s1 h1:7iwn62FGlOqG9oRHwTY/+AbFlSZffWMqx6WUXjRpQPk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.1-k3s1/go.mod h1:DKjoC7WTLvupppdmb5jEvRDPQENLZqz/stEUs19TOOc=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.1-k3s1 h1:xx/SYlAfrliD+w2REUCBMFtQvATof0YitCHpswviL8s=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.1-k3s1/go.mod h1:z4ndT0jW6BSHky3MjKfpX8hfxFiOobduUEPsG67DW+o=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.1-k3s1 h1:Ehl7DzKcuQqcT1lqNgx8+E2WFfvcnxyFTR408v6/zcE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.1-k3s1/go.mod h1:nNWKgJz7U8Te/QwaBeDSCEsvmYwqE+lIJtSthnQTluo=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.1-k3s1 h1:fHAKfXieH13JHdEIstbWmDsy32d7Lr8jmxZgbMy4D4M=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.1-k3s1/go.mod h1:55vGov1alDiAFatappea/yL2CFIuYfSpQmw8xdZNCxw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.1-k3s1 h1:iFah9rWBxlUgEE1nrmpOhrMI+vDArwTR6yfL3lS75PU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.1-k3s1/go.mod h1:4vy7xXlS9QNceWCoFBkdxsp50ToEpoM5TjtRKTRxyF0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.1-k3s1 h1:LiLc5dwo9eWPCyro6s31zKB/m2mWX8QSU7u+UpIu3O0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.1-k3s1/go.mod h1:dQ/jkraN2wXzFTYGG3wlzs/YappHsx2F/PxtQ9KyDZY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.1-k3s1 h1:AnNUwMVdRRjOqOmNbN05yxoTNH6Gp4YZsDLDqSSXrpo=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.1-k3s1/go.mod h1:7Q3crE6C30z+md2412q3PcSR2P2NIv1WxHH5ug8LVmY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.1-k3s1 h1:S961JPII6QbT1j0wAFnRv2EKeRiDZYsCSMkAPReZ4MY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.1-k3s1/go.mod h1:yvPZprzrt0uOuPx/Tkg3zCSdulxPWxWU2nznGYKmMVk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.1-k3s1 h1:6zVRXrGwZu2G5IN3jjftM9R5tJ++2ou4rWBlkcK+ivo=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.1-k3s1/go.mod h1:zeT/sAA82/kLPoYfV1Xc1x0PjZlRvqCsrgRZj0Q/OTc=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.1-k3s1 h1:97KjbtEom5IvBTbGktGOEFogPx+RruR2LJ61Oq7HpMI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.1-k3s1/go.mod h1:+aA0FBzPTZJn0I03PDY6xs4stHFP1CGYPNmcAkgDvX4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.1-k3s1 h1:EZV6LHZ1Xy96bGhpdonZWMDqJ3vsJO+G4og3Y2bhwFw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.1-k3s1/go.mod h1:W/TBpRMx7t6+ZimgebLr7be4Pfb7JvWpeer+kPlA6eI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.1-k3s1 h1:uG7fW+0DgGvKvS+QHlsUUCiplS7TpkKApxqa8kMgQZc=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.1-k3s1/go.mod h1:SnaQa8bOBayBORIZwicYBm9QrlwUPi2PKlMQOhI6HAU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.1-k3s1 h1:A9dgWvKNTG99Dv+wk+9kMYiRR3n8r/O8YbRowznaqvE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.1-k3s1/go.mod h1:9zdUgM3lMAO5+g35Vyq3OQdMrylvtV97G7t5emDYCOs=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.1-k3s1 h1:ZA9oCLCAQ+QsT4CBsfmoAZC8OpvX3aREqgrmcHdsYDU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.1-k3s1/go.mod h1:NmtKDopOnphD2IlcH9OjoxoI4mEkkgGhVw7dTbdBTY0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.1-k3s1 h1:Jq6Ix8pq5N6YXhFk9dRsogpr824gsow/31vIia1FLLU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.1-k3s1/go.mod h1:KhCAvsyM1iGJLSql7cTjBXwRzigyfTwRcu1unidRJmo=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.1-k3s1 h1:7Dls9SYhW2caBAgvO/d+bDsnbnWn5oryOCldyTi/48g=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.1-k3s1/go.mod h1:46iKO45TZat/zvPyqe8TjLLrTS/U/nGB92Ft63PEPF0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.1-k3s1 h1:1AzewTobcKns6rupjf6ZcNduUbA2saJbRdlj0Xk6A8M=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.1-k3s1/go.mod h1:6mEp02ABsuOeeBuUrrol78v9LYysX7Z8CZOMFlkPOOI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.1-k3s1 h1:orhYkSYqltNEiiwzBZHbi5eiis5NGfAWUHKg/0XBVdk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.1-k3s1/go.mod h1:8YcoZs+nmcq5YEBZXOkUtuLpYpPoZWC7I3CAUOvZGNc=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.1-k3s1 h1:8vP+SyaEgSZjGKLYhwp8//9XwpYYhtLjL2+eLBKs+No=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.1-k3s1/go.mod h1:ZRzv7towE/rnhoSlglnClSdwvG956r8BtWsV8GriLCw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.1-k3s1 h1:xxDhyVqo40oFL+QRnE7Aq3f4YOmp39hsWs3CG3h/BJA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.1-k3s1/go.mod h1:o8/6oYd5NojfXqZcgzwRY6/N9H0txmvDbs4Sk6Laz0A=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.1-k3s1 h1:VNow0MZPpjsdJ4VTVAMvguvUtEM9VRBzcZCepwpqLEQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.1-k3s1/go.mod h1:Fqw7RNHll7guUATPVDLSfOKr2ayRlFGMI3SMrxo5ECU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.1-k3s1 h1:XkxQNzMRtm5HG8N3m6P6NVcEiTOae6zyQ1T7aZ1m3aM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.1-k3s1/go.mod h1:/SknInvlq+Fm+vrO/Z7JYHjxwIsxAl32mAI9tUH/lGY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.1-k3s1 h1:KxvblpY/4qohv46MZBJtWxwY8u9YSqQnNXinl1d7DGM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.1-k3s1/go.mod h1:99KFJSKqMgMvpCWJr4w6ooLZgR+2usWp5GPaILFNq9k=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.21.1-k3s1/go.mod h1:WkDfrpYVRWS0Muw8Vg5XicfVdTxnLvgiT8tX8DSD0Zo=
|
||||
github.com/k3s-io/kubernetes v1.21.2-k3s1 h1:e5R77BWsEEiZqoZ68t9iKF08KwSVBqASwy4N9Uq4FHw=
|
||||
github.com/k3s-io/kubernetes v1.21.2-k3s1/go.mod h1:HevHCwYnT2nf/6w8I+b2tpz1NvzJmHZ9nOjh9ng7Rwg=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.2-k3s1 h1:CAhRjFSPftQWJXLDtuP9y2bpOhTrGtCvfa+oN2iNY7k=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.2-k3s1/go.mod h1:DKjoC7WTLvupppdmb5jEvRDPQENLZqz/stEUs19TOOc=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.2-k3s1 h1:kKGNORHw4IhjH4C867kb/CCMynNLZVk7+V4I/Ep2W8s=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.2-k3s1/go.mod h1:zQKYFndsHewFINNGXUVknGznYOgTQ0c4IQi4DrS0ta8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.2-k3s1 h1:7b3+T6bvbPajPCrHO4RJZf0JUFmWJ96Wg2eoQqs28aY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.2-k3s1/go.mod h1:AgbIgqlxGq0YtOvWIc73AIwKvgS5yfJEbyBdhrkXV/c=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.2-k3s1 h1:aPLokNjj6C81f9dbxd6euZVy7sD406OqN9nSClQK4JE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.2-k3s1/go.mod h1:PM0TmITj+YSPTwQ8YixbMg6UixqKNeg/UJBsp98N0Zs=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.2-k3s1 h1:Ixs9vfj0WVQIuUCMIAISEeiZtdkuw5W9m0TlbOvR6Fc=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.2-k3s1/go.mod h1:4vy7xXlS9QNceWCoFBkdxsp50ToEpoM5TjtRKTRxyF0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.2-k3s1 h1:LfxsT//pqw/Y4oa1D7zv8tDP91nFMNAjtuua8EexVQU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.2-k3s1/go.mod h1:22bxxqEB5R54skz0yl4etUWTqEz3816+sKF3fv7ZTek=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.2-k3s1 h1:We9VVPP3ZGefujiroYhcn2TSCoW96kNU/KP4zVjIh2E=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.2-k3s1/go.mod h1:LOQtDqZAF6SU/+dQ4bDbsIWiPQ1dGx7EsvMq96pXa/k=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.2-k3s1 h1:yuWnFqpqovdnz6hCYXXbCSnJD982gkM+2dBS5neUX7s=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.2-k3s1/go.mod h1:yvPZprzrt0uOuPx/Tkg3zCSdulxPWxWU2nznGYKmMVk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.2-k3s1 h1:SBOFiFHswuSnSc5FTml+BuUkPI6WN6XnjLhrDvT4c0Q=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.2-k3s1/go.mod h1:+6+s/UlgjNARtpIEWBMruqmdEhMHuiaSQ2iiKi9x5HE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.2-k3s1 h1:gW/cF4VkgB4B8dGiOMXeW7cuWyaI/Ud2VGhy/xP+2yY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.2-k3s1/go.mod h1:nx6pwxjFc3YpuvLHXrXu10vFTdNuyJ6bmRrNYdF2Z68=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.2-k3s1 h1:FAFxg4+AJ8hXSy+R+F6kQbEeFZcWciqg2pBsnbtA2A8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.2-k3s1/go.mod h1:kutkhetwPn05JbZkAtXBuIm5+cHbrBa1loHlO540yFk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.2-k3s1 h1:qwHnNP8E+nCQokz3txbWMxDDvevcb6HRmGWw7ixmVyo=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.2-k3s1/go.mod h1:SnaQa8bOBayBORIZwicYBm9QrlwUPi2PKlMQOhI6HAU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.2-k3s1 h1:bLCZxD07GSTzem1+YdFoDJe6KEE8JhIuVFZxS4f7kCM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.2-k3s1/go.mod h1:8a9+wxOscdSWUhL3k9ZL59Q/DmUJ0wlOAMalnNTNDSs=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.2-k3s1 h1:OWchsI+Z7luDNd28NGzETUF5scxdKiOWPfX2aEIP6EY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.2-k3s1/go.mod h1:NmtKDopOnphD2IlcH9OjoxoI4mEkkgGhVw7dTbdBTY0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.2-k3s1 h1:JItxfsk2yY2ZuUiHIQS2gyXwufNIFD60ateSHrCcIRQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.2-k3s1/go.mod h1:KhCAvsyM1iGJLSql7cTjBXwRzigyfTwRcu1unidRJmo=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.2-k3s1 h1:ucmL5/OKiJqWS4schPSsdKqIwQediubfiET8/LX15uo=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.2-k3s1/go.mod h1:46iKO45TZat/zvPyqe8TjLLrTS/U/nGB92Ft63PEPF0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.2-k3s1 h1:P4s3B0AF/BWvje40sI5fbycsbT7JJdMIIKNqo3A7YI8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.2-k3s1/go.mod h1:6mEp02ABsuOeeBuUrrol78v9LYysX7Z8CZOMFlkPOOI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.2-k3s1 h1:iYitLmIxiZrw2AVcSOAhSyReSwSJXInqXbxmFfe2ltY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.2-k3s1/go.mod h1:2YTC4iC4FdyjWdoteA+Z3lDuwrH+6Cm1W59SS0RzioE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.2-k3s1 h1:nX8q82vxSDmdOEuoH3CMYJp5dNPqgM5Zzin+KsjjzlQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.2-k3s1/go.mod h1:9wq1Uz9l9XWtJKigYn/xv8z7yhVpfQh76/aPw27fxKM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.2-k3s1 h1:YsRUmhXR6qgfjmLmzoGf/Esagy4trQ5qdQT5JpFcSB0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.2-k3s1/go.mod h1:o8/6oYd5NojfXqZcgzwRY6/N9H0txmvDbs4Sk6Laz0A=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.2-k3s1 h1:Th9PQxHb586fhQRnGl868ySYX5Z/6TtNO/iXsVFq9kI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.2-k3s1/go.mod h1:J/qZeQTPahToXaMJSjdjrJek4VAporM+4/62jAxvRLI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.2-k3s1 h1:kdbAcIjUFUg97TIzrLlZPRB9odLzb7URCLnIeQJqNgQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.2-k3s1/go.mod h1:/SknInvlq+Fm+vrO/Z7JYHjxwIsxAl32mAI9tUH/lGY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.2-k3s1 h1:pXmTCdMGR4bEedygDmnSsvkEasHwcyEGHab+VTnFVw4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.2-k3s1/go.mod h1:99KFJSKqMgMvpCWJr4w6ooLZgR+2usWp5GPaILFNq9k=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.21.2-k3s1/go.mod h1:WkDfrpYVRWS0Muw8Vg5XicfVdTxnLvgiT8tX8DSD0Zo=
|
||||
github.com/k3s-io/protobuf v1.4.3-k3s1 h1:gduXrSm/6KkbTuctP6bASYqKQ/tyC/PNYqxBmJnk4Tc=
|
||||
github.com/k3s-io/protobuf v1.4.3-k3s1/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
|
||||
|
@ -770,8 +770,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
|||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v1.0.0-rc93.0.20210414171415-3397a09ee932 h1:Gw8DJ5twfTK8WGYuxdCjZg25SbXFKnSb52eeToqsQX8=
|
||||
github.com/opencontainers/runc v1.0.0-rc93.0.20210414171415-3397a09ee932/go.mod h1:0nk5nDuj73fOIE5fbIxhfP94BUmFvSeQZVbKXW2WsPY=
|
||||
github.com/opencontainers/runc v1.0.0-rc95 h1:RMuWVfY3E1ILlVsC3RhIq38n4sJtlOFwU9gfFZSqrd0=
|
||||
github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210316141917-a8c4a9ee0f6b h1:ZDY8P/luqXqGJSNCux8+9GeKmBDS+JVgVuIwKTauiwM=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210316141917-a8c4a9ee0f6b/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
||||
|
@ -1256,8 +1256,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
|
|||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19 h1:0jaDAAxtqIrrqas4vtTqxct4xS5kHfRNycTRLTyJmVM=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/kustomize/api v0.8.8 h1:G2z6JPSSjtWWgMeWSoHdXqyftJNmMmyxXpwENGoOtGE=
|
||||
sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY=
|
||||
sigs.k8s.io/kustomize/cmd/config v0.9.10/go.mod h1:Mrby0WnRH7hA6OwOYnYpfpiY0WJIMgYrEDfwOeFdMK0=
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
ARG GO_VERSION=1.15
|
||||
ARG BATS_VERSION=v1.2.1
|
||||
ARG GO_VERSION=1.16
|
||||
ARG BATS_VERSION=v1.3.0
|
||||
|
||||
FROM golang:${GO_VERSION}-buster
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
## Emeritus ##
|
||||
|
||||
We would like to acknowledge previous runc maintainers and their huge
|
||||
contributions to our collective success:
|
||||
|
||||
* Alexander Morozov (@lk4d4)
|
||||
* Andrei Vagin (@avagin)
|
||||
* Rohit Jnagal (@rjnagal)
|
||||
* Victor Marmol (@vmarmol)
|
||||
|
||||
We thank these members for their service to the OCI community.
|
|
@ -1,5 +1,5 @@
|
|||
CONTAINER_ENGINE := docker
|
||||
GO := go
|
||||
GO ?= go
|
||||
|
||||
PREFIX ?= /usr/local
|
||||
BINDIR := $(PREFIX)/sbin
|
||||
|
@ -14,6 +14,10 @@ COMMIT_NO := $(shell git rev-parse HEAD 2> /dev/null || true)
|
|||
COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),"$(COMMIT_NO)-dirty","$(COMMIT_NO)")
|
||||
VERSION := $(shell cat ./VERSION)
|
||||
|
||||
# TODO: rm -mod=vendor once go 1.13 is unsupported
|
||||
ifneq ($(GO111MODULE),off)
|
||||
MOD_VENDOR := "-mod=vendor"
|
||||
endif
|
||||
ifeq ($(shell $(GO) env GOOS),linux)
|
||||
ifeq (,$(filter $(shell $(GO) env GOARCH),mips mipsle mips64 mips64le ppc64))
|
||||
ifeq (,$(findstring -race,$(EXTRA_FLAGS)))
|
||||
|
@ -21,9 +25,9 @@ ifeq ($(shell $(GO) env GOOS),linux)
|
|||
endif
|
||||
endif
|
||||
endif
|
||||
GO_BUILD := $(GO) build -trimpath $(GO_BUILDMODE) $(EXTRA_FLAGS) -tags "$(BUILDTAGS)" \
|
||||
GO_BUILD := $(GO) build -trimpath $(MOD_VENDOR) $(GO_BUILDMODE) $(EXTRA_FLAGS) -tags "$(BUILDTAGS)" \
|
||||
-ldflags "-X main.gitCommit=$(COMMIT) -X main.version=$(VERSION) $(EXTRA_LDFLAGS)"
|
||||
GO_BUILD_STATIC := CGO_ENABLED=1 $(GO) build -trimpath $(EXTRA_FLAGS) -tags "$(BUILDTAGS) netgo osusergo" \
|
||||
GO_BUILD_STATIC := CGO_ENABLED=1 $(GO) build -trimpath $(MOD_VENDOR) $(EXTRA_FLAGS) -tags "$(BUILDTAGS) netgo osusergo" \
|
||||
-ldflags "-w -extldflags -static -X main.gitCommit=$(COMMIT) -X main.version=$(VERSION) $(EXTRA_LDFLAGS)"
|
||||
|
||||
.DEFAULT: runc
|
||||
|
@ -70,7 +74,7 @@ unittest: runcimage
|
|||
$(RUNC_IMAGE) make localunittest TESTFLAGS=$(TESTFLAGS)
|
||||
|
||||
localunittest: all
|
||||
$(GO) test -timeout 3m -tags "$(BUILDTAGS)" $(TESTFLAGS) -v ./...
|
||||
$(GO) test $(MOD_VENDOR) -timeout 3m -tags "$(BUILDTAGS)" $(TESTFLAGS) -v ./...
|
||||
|
||||
integration: runcimage
|
||||
$(CONTAINER_ENGINE) run $(CONTAINER_ENGINE_RUN_FLAGS) \
|
||||
|
|
|
@ -28,7 +28,7 @@ A third party security audit was performed by Cure53, you can see the full repor
|
|||
## Building
|
||||
|
||||
`runc` currently supports the Linux platform with various architecture support.
|
||||
It must be built with Go version 1.14 or higher.
|
||||
It must be built with Go version 1.13 or higher.
|
||||
|
||||
In order to enable seccomp support you will need to install `libseccomp` on your platform.
|
||||
> e.g. `libseccomp-devel` for CentOS, or `libseccomp-dev` for Ubuntu
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.0.0-rc93+dev
|
||||
1.0.0-rc95
|
||||
|
|
|
@ -15,8 +15,8 @@ Vagrant.configure("2") do |config|
|
|||
set -e -u -o pipefail
|
||||
|
||||
# configuration
|
||||
GO_VERSION="1.15"
|
||||
BATS_VERSION="v1.2.1"
|
||||
GO_VERSION="1.16.4"
|
||||
BATS_VERSION="v1.3.0"
|
||||
|
||||
# install yum packages
|
||||
yum install -y -q epel-release
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
Vagrant.configure("2") do |config|
|
||||
# Fedora box is used for testing cgroup v2 support
|
||||
config.vm.box = "fedora/33-cloud-base"
|
||||
config.vm.box = "fedora/34-cloud-base"
|
||||
config.vm.provider :virtualbox do |v|
|
||||
v.memory = 2048
|
||||
v.cpus = 2
|
||||
|
@ -41,8 +41,8 @@ EOF
|
|||
cat > /etc/systemd/system/user@.service.d/delegate.conf << EOF
|
||||
[Service]
|
||||
# default: Delegate=pids memory
|
||||
# NOTE: delegation of cpuset requires systemd >= 244 (Fedora >= 32, Ubuntu >= 20.04). cpuset is ignored on Fedora 31.
|
||||
Delegate=cpu cpuset io memory pids
|
||||
# NOTE: delegation of cpuset requires systemd >= 244 (Fedora >= 32, Ubuntu >= 20.04).
|
||||
Delegate=yes
|
||||
EOF
|
||||
systemctl daemon-reload
|
||||
SHELL
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
criu "github.com/checkpoint-restore/go-criu/v5/rpc"
|
||||
|
@ -78,12 +79,37 @@ checkpointed.`,
|
|||
},
|
||||
}
|
||||
|
||||
func getCheckpointImagePath(context *cli.Context) string {
|
||||
func prepareImagePaths(context *cli.Context) (string, string, error) {
|
||||
imagePath := context.String("image-path")
|
||||
if imagePath == "" {
|
||||
imagePath = getDefaultImagePath(context)
|
||||
}
|
||||
return imagePath
|
||||
|
||||
if err := os.MkdirAll(imagePath, 0600); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
parentPath := context.String("parent-path")
|
||||
if parentPath == "" {
|
||||
return imagePath, parentPath, nil
|
||||
}
|
||||
|
||||
if filepath.IsAbs(parentPath) {
|
||||
return "", "", errors.New("--parent-path must be relative")
|
||||
}
|
||||
|
||||
realParent := filepath.Join(imagePath, parentPath)
|
||||
fi, err := os.Stat(realParent)
|
||||
if err == nil && !fi.IsDir() {
|
||||
err = &os.PathError{Path: realParent, Err: unix.ENOTDIR}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("invalid --parent-path: %w", err)
|
||||
}
|
||||
|
||||
return imagePath, parentPath, nil
|
||||
|
||||
}
|
||||
|
||||
func setPageServer(context *cli.Context, options *libcontainer.CriuOpts) {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
module github.com/opencontainers/runc
|
||||
|
||||
go 1.14
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0
|
||||
|
@ -23,6 +23,6 @@ require (
|
|||
github.com/vishvananda/netlink v1.1.0
|
||||
github.com/willf/bitset v1.1.11
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887
|
||||
google.golang.org/protobuf v1.25.0
|
||||
)
|
||||
|
|
|
@ -105,6 +105,8 @@ golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
|
|
|
@ -7,37 +7,44 @@ import (
|
|||
)
|
||||
|
||||
type Manager interface {
|
||||
// Applies cgroup configuration to the process with the specified pid
|
||||
// Apply creates a cgroup, if not yet created, and adds a process
|
||||
// with the specified pid into that cgroup. A special value of -1
|
||||
// can be used to merely create a cgroup.
|
||||
Apply(pid int) error
|
||||
|
||||
// Returns the PIDs inside the cgroup set
|
||||
// GetPids returns the PIDs of all processes inside the cgroup.
|
||||
GetPids() ([]int, error)
|
||||
|
||||
// Returns the PIDs inside the cgroup set & all sub-cgroups
|
||||
// GetAllPids returns the PIDs of all processes inside the cgroup
|
||||
// any all its sub-cgroups.
|
||||
GetAllPids() ([]int, error)
|
||||
|
||||
// Returns statistics for the cgroup set
|
||||
// GetStats returns cgroups statistics.
|
||||
GetStats() (*Stats, error)
|
||||
|
||||
// Toggles the freezer cgroup according with specified state
|
||||
// Freeze sets the freezer cgroup to the specified state.
|
||||
Freeze(state configs.FreezerState) error
|
||||
|
||||
// Destroys the cgroup set
|
||||
// Destroy removes cgroup.
|
||||
Destroy() error
|
||||
|
||||
// Path returns a cgroup path to the specified controller/subsystem.
|
||||
// For cgroupv2, the argument is unused and can be empty.
|
||||
Path(string) string
|
||||
|
||||
// Sets the cgroup as configured.
|
||||
Set(container *configs.Config) error
|
||||
// Set sets cgroup resources parameters/limits. If the argument is nil,
|
||||
// the resources specified during Manager creation (or the previous call
|
||||
// to Set) are used.
|
||||
Set(r *configs.Resources) error
|
||||
|
||||
// GetPaths returns cgroup path(s) to save in a state file in order to restore later.
|
||||
// GetPaths returns cgroup path(s) to save in a state file in order to
|
||||
// restore later.
|
||||
//
|
||||
// For cgroup v1, a key is cgroup subsystem name, and the value is the path
|
||||
// to the cgroup for this subsystem.
|
||||
// For cgroup v1, a key is cgroup subsystem name, and the value is the
|
||||
// path to the cgroup for this subsystem.
|
||||
//
|
||||
// For cgroup v2 unified hierarchy, a key is "", and the value is the unified path.
|
||||
// For cgroup v2 unified hierarchy, a key is "", and the value is the
|
||||
// unified path.
|
||||
GetPaths() map[string]string
|
||||
|
||||
// GetCgroups returns the cgroup data as configured.
|
||||
|
@ -46,7 +53,7 @@ type Manager interface {
|
|||
// GetFreezerState retrieves the current FreezerState of the cgroup.
|
||||
GetFreezerState() (configs.FreezerState, error)
|
||||
|
||||
// Whether the cgroup path exists or not
|
||||
// Exists returns whether the cgroup path exists or not.
|
||||
Exists() bool
|
||||
|
||||
// OOMKillCount reports OOM kill count for the cgroup.
|
||||
|
|
|
@ -25,19 +25,19 @@ func (s *BlkioGroup) Apply(path string, d *cgroupData) error {
|
|||
return join(path, d.pid)
|
||||
}
|
||||
|
||||
func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.Resources.BlkioWeight != 0 {
|
||||
if err := fscommon.WriteFile(path, "blkio.weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil {
|
||||
func (s *BlkioGroup) Set(path string, r *configs.Resources) error {
|
||||
if r.BlkioWeight != 0 {
|
||||
if err := fscommon.WriteFile(path, "blkio.weight", strconv.FormatUint(uint64(r.BlkioWeight), 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if cgroup.Resources.BlkioLeafWeight != 0 {
|
||||
if err := fscommon.WriteFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioLeafWeight), 10)); err != nil {
|
||||
if r.BlkioLeafWeight != 0 {
|
||||
if err := fscommon.WriteFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(r.BlkioLeafWeight), 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, wd := range cgroup.Resources.BlkioWeightDevice {
|
||||
for _, wd := range r.BlkioWeightDevice {
|
||||
if err := fscommon.WriteFile(path, "blkio.weight_device", wd.WeightString()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -45,22 +45,22 @@ func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice {
|
||||
for _, td := range r.BlkioThrottleReadBpsDevice {
|
||||
if err := fscommon.WriteFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice {
|
||||
for _, td := range r.BlkioThrottleWriteBpsDevice {
|
||||
if err := fscommon.WriteFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice {
|
||||
for _, td := range r.BlkioThrottleReadIOPSDevice {
|
||||
if err := fscommon.WriteFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice {
|
||||
for _, td := range r.BlkioThrottleWriteIOPSDevice {
|
||||
if err := fscommon.WriteFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ func (s *CpuGroup) Apply(path string, d *cgroupData) error {
|
|||
// We should set the real-Time group scheduling settings before moving
|
||||
// in the process because if the process is already in SCHED_RR mode
|
||||
// and no RT bandwidth is set, adding it will fail.
|
||||
if err := s.SetRtSched(path, d.config); err != nil {
|
||||
if err := s.SetRtSched(path, d.config.Resources); err != nil {
|
||||
return err
|
||||
}
|
||||
// Since we are not using join(), we need to place the pid
|
||||
|
@ -40,23 +40,23 @@ func (s *CpuGroup) Apply(path string, d *cgroupData) error {
|
|||
return cgroups.WriteCgroupProc(path, d.pid)
|
||||
}
|
||||
|
||||
func (s *CpuGroup) SetRtSched(path string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.Resources.CpuRtPeriod != 0 {
|
||||
if err := fscommon.WriteFile(path, "cpu.rt_period_us", strconv.FormatUint(cgroup.Resources.CpuRtPeriod, 10)); err != nil {
|
||||
func (s *CpuGroup) SetRtSched(path string, r *configs.Resources) error {
|
||||
if r.CpuRtPeriod != 0 {
|
||||
if err := fscommon.WriteFile(path, "cpu.rt_period_us", strconv.FormatUint(r.CpuRtPeriod, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.Resources.CpuRtRuntime != 0 {
|
||||
if err := fscommon.WriteFile(path, "cpu.rt_runtime_us", strconv.FormatInt(cgroup.Resources.CpuRtRuntime, 10)); err != nil {
|
||||
if r.CpuRtRuntime != 0 {
|
||||
if err := fscommon.WriteFile(path, "cpu.rt_runtime_us", strconv.FormatInt(r.CpuRtRuntime, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpuGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.Resources.CpuShares != 0 {
|
||||
shares := cgroup.Resources.CpuShares
|
||||
func (s *CpuGroup) Set(path string, r *configs.Resources) error {
|
||||
if r.CpuShares != 0 {
|
||||
shares := r.CpuShares
|
||||
if err := fscommon.WriteFile(path, "cpu.shares", strconv.FormatUint(shares, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -72,17 +72,17 @@ func (s *CpuGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return fmt.Errorf("the minimum allowed cpu-shares is %d", sharesRead)
|
||||
}
|
||||
}
|
||||
if cgroup.Resources.CpuPeriod != 0 {
|
||||
if err := fscommon.WriteFile(path, "cpu.cfs_period_us", strconv.FormatUint(cgroup.Resources.CpuPeriod, 10)); err != nil {
|
||||
if r.CpuPeriod != 0 {
|
||||
if err := fscommon.WriteFile(path, "cpu.cfs_period_us", strconv.FormatUint(r.CpuPeriod, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.Resources.CpuQuota != 0 {
|
||||
if err := fscommon.WriteFile(path, "cpu.cfs_quota_us", strconv.FormatInt(cgroup.Resources.CpuQuota, 10)); err != nil {
|
||||
if r.CpuQuota != 0 {
|
||||
if err := fscommon.WriteFile(path, "cpu.cfs_quota_us", strconv.FormatInt(r.CpuQuota, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return s.SetRtSched(path, cgroup)
|
||||
return s.SetRtSched(path, r)
|
||||
}
|
||||
|
||||
func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
|
|
|
@ -43,7 +43,7 @@ func (s *CpuacctGroup) Apply(path string, d *cgroupData) error {
|
|||
return join(path, d.pid)
|
||||
}
|
||||
|
||||
func (s *CpuacctGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
func (s *CpuacctGroup) Set(_ string, _ *configs.Resources) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -24,17 +24,17 @@ func (s *CpusetGroup) Name() string {
|
|||
}
|
||||
|
||||
func (s *CpusetGroup) Apply(path string, d *cgroupData) error {
|
||||
return s.ApplyDir(path, d.config, d.pid)
|
||||
return s.ApplyDir(path, d.config.Resources, d.pid)
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.Resources.CpusetCpus != "" {
|
||||
if err := fscommon.WriteFile(path, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil {
|
||||
func (s *CpusetGroup) Set(path string, r *configs.Resources) error {
|
||||
if r.CpusetCpus != "" {
|
||||
if err := fscommon.WriteFile(path, "cpuset.cpus", r.CpusetCpus); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.Resources.CpusetMems != "" {
|
||||
if err := fscommon.WriteFile(path, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil {
|
||||
if r.CpusetMems != "" {
|
||||
if err := fscommon.WriteFile(path, "cpuset.mems", r.CpusetMems); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error {
|
||||
func (s *CpusetGroup) ApplyDir(dir string, r *configs.Resources, pid int) error {
|
||||
// This might happen if we have no cpuset cgroup mounted.
|
||||
// Just do nothing and don't fail.
|
||||
if dir == "" {
|
||||
|
@ -166,7 +166,7 @@ func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) erro
|
|||
// specified configs, otherwise, inherit from parent. This makes
|
||||
// cpuset configs work correctly with 'cpuset.cpu_exclusive', and
|
||||
// keep backward compatibility.
|
||||
if err := s.ensureCpusAndMems(dir, cgroup); err != nil {
|
||||
if err := s.ensureCpusAndMems(dir, r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -241,8 +241,8 @@ func isEmptyCpuset(str string) bool {
|
|||
return str == "" || str == "\n"
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) ensureCpusAndMems(path string, cgroup *configs.Cgroup) error {
|
||||
if err := s.Set(path, cgroup); err != nil {
|
||||
func (s *CpusetGroup) ensureCpusAndMems(path string, r *configs.Resources) error {
|
||||
if err := s.Set(path, r); err != nil {
|
||||
return err
|
||||
}
|
||||
return cpusetCopyIfNeeded(path, filepath.Dir(path))
|
||||
|
|
|
@ -54,8 +54,8 @@ func buildEmulator(rules []*devices.Rule) (*cgroupdevices.Emulator, error) {
|
|||
return emu, nil
|
||||
}
|
||||
|
||||
func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if userns.RunningInUserNS() || cgroup.SkipDevices {
|
||||
func (s *DevicesGroup) Set(path string, r *configs.Resources) error {
|
||||
if userns.RunningInUserNS() || r.SkipDevices {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
target, err := buildEmulator(cgroup.Resources.Devices)
|
||||
target, err := buildEmulator(r.Devices)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -27,27 +27,47 @@ func (s *FreezerGroup) Apply(path string, d *cgroupData) error {
|
|||
return join(path, d.pid)
|
||||
}
|
||||
|
||||
func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
switch cgroup.Resources.Freezer {
|
||||
func (s *FreezerGroup) Set(path string, r *configs.Resources) (Err error) {
|
||||
switch r.Freezer {
|
||||
case configs.Frozen:
|
||||
defer func() {
|
||||
if Err != nil {
|
||||
// Freezing failed, and it is bad and dangerous
|
||||
// to leave the cgroup in FROZEN or FREEZING
|
||||
// state, so (try to) thaw it back.
|
||||
_ = fscommon.WriteFile(path, "freezer.state", string(configs.Thawed))
|
||||
}
|
||||
}()
|
||||
|
||||
// As per older kernel docs (freezer-subsystem.txt before
|
||||
// kernel commit ef9fe980c6fcc1821), if FREEZING is seen,
|
||||
// userspace should either retry or thaw. While current
|
||||
// kernel cgroup v1 docs no longer mention a need to retry,
|
||||
// the kernel (tested on v5.4, Ubuntu 20.04) can't reliably
|
||||
// freeze a cgroup while new processes keep appearing in it
|
||||
// even a recent kernel (v5.4, Ubuntu 20.04) can't reliably
|
||||
// freeze a cgroup v1 while new processes keep appearing in it
|
||||
// (either via fork/clone or by writing new PIDs to
|
||||
// cgroup.procs).
|
||||
//
|
||||
// The numbers below are chosen to have a decent chance to
|
||||
// succeed even in the worst case scenario (runc pause/unpause
|
||||
// with parallel runc exec).
|
||||
// The numbers below are empirically chosen to have a decent
|
||||
// chance to succeed in various scenarios ("runc pause/unpause
|
||||
// with parallel runc exec" and "bare freeze/unfreeze on a very
|
||||
// slow system"), tested on RHEL7 and Ubuntu 20.04 kernels.
|
||||
//
|
||||
// Adding any amount of sleep in between retries did not
|
||||
// increase the chances of successful freeze.
|
||||
// increase the chances of successful freeze in "pause/unpause
|
||||
// with parallel exec" reproducer. OTOH, adding an occasional
|
||||
// sleep helped for the case where the system is extremely slow
|
||||
// (CentOS 7 VM on GHA CI).
|
||||
//
|
||||
// Alas, this is still a game of chances, since the real fix
|
||||
// belong to the kernel (cgroup v2 do not have this bug).
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
if i%50 == 49 {
|
||||
// Briefly thawing the cgroup also helps.
|
||||
// Occasional thaw and sleep improves
|
||||
// the chances to succeed in freezing
|
||||
// in case new processes keep appearing
|
||||
// in the cgroup.
|
||||
_ = fscommon.WriteFile(path, "freezer.state", string(configs.Thawed))
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
@ -56,6 +76,13 @@ func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if i%25 == 24 {
|
||||
// Occasional short sleep before reading
|
||||
// the state back also improves the chances to
|
||||
// succeed in freezing in case of a very slow
|
||||
// system.
|
||||
time.Sleep(10 * time.Microsecond)
|
||||
}
|
||||
state, err := fscommon.ReadFile(path, "freezer.state")
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -75,16 +102,13 @@ func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
}
|
||||
}
|
||||
// Despite our best efforts, it got stuck in FREEZING.
|
||||
// Leaving it in this state is bad and dangerous, so
|
||||
// let's (try to) thaw it back and error out.
|
||||
_ = fscommon.WriteFile(path, "freezer.state", string(configs.Thawed))
|
||||
return errors.New("unable to freeze")
|
||||
case configs.Thawed:
|
||||
return fscommon.WriteFile(path, "freezer.state", string(configs.Thawed))
|
||||
case configs.Undefined:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("Invalid argument '%s' to freezer.state", string(cgroup.Resources.Freezer))
|
||||
return fmt.Errorf("Invalid argument '%s' to freezer.state", string(r.Freezer))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -44,8 +44,8 @@ type subsystem interface {
|
|||
GetStats(path string, stats *cgroups.Stats) error
|
||||
// Creates and joins the cgroup represented by 'cgroupData'.
|
||||
Apply(path string, c *cgroupData) error
|
||||
// Set the cgroup represented by cgroup.
|
||||
Set(path string, cgroup *configs.Cgroup) error
|
||||
// Set sets the cgroup resources.
|
||||
Set(path string, r *configs.Resources) error
|
||||
}
|
||||
|
||||
type manager struct {
|
||||
|
@ -274,8 +274,8 @@ func (m *manager) GetStats() (*cgroups.Stats, error) {
|
|||
return stats, nil
|
||||
}
|
||||
|
||||
func (m *manager) Set(container *configs.Config) error {
|
||||
if container.Cgroups == nil {
|
||||
func (m *manager) Set(r *configs.Resources) error {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -284,7 +284,7 @@ func (m *manager) Set(container *configs.Config) error {
|
|||
if m.cgroups != nil && m.cgroups.Paths != nil {
|
||||
return nil
|
||||
}
|
||||
if container.Cgroups.Resources.Unified != nil {
|
||||
if r.Unified != nil {
|
||||
return cgroups.ErrV1NoUnified
|
||||
}
|
||||
|
||||
|
@ -292,11 +292,11 @@ func (m *manager) Set(container *configs.Config) error {
|
|||
defer m.mu.Unlock()
|
||||
for _, sys := range subsystems {
|
||||
path := m.paths[sys.Name()]
|
||||
if err := sys.Set(path, container.Cgroups); err != nil {
|
||||
if err := sys.Set(path, r); err != nil {
|
||||
if m.rootless && sys.Name() == "devices" {
|
||||
continue
|
||||
}
|
||||
// When m.Rootless is true, errors from the device subsystem are ignored because it is really not expected to work.
|
||||
// When m.rootless is true, errors from the device subsystem are ignored because it is really not expected to work.
|
||||
// However, errors from other subsystems are not ignored.
|
||||
// see @test "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error"
|
||||
if path == "" {
|
||||
|
@ -322,7 +322,7 @@ func (m *manager) Freeze(state configs.FreezerState) error {
|
|||
prevState := m.cgroups.Resources.Freezer
|
||||
m.cgroups.Resources.Freezer = state
|
||||
freezer := &FreezerGroup{}
|
||||
if err := freezer.Set(path, m.cgroups); err != nil {
|
||||
if err := freezer.Set(path, m.cgroups.Resources); err != nil {
|
||||
m.cgroups.Resources.Freezer = prevState
|
||||
return err
|
||||
}
|
||||
|
@ -428,5 +428,11 @@ func OOMKillCount(path string) (uint64, error) {
|
|||
}
|
||||
|
||||
func (m *manager) OOMKillCount() (uint64, error) {
|
||||
return OOMKillCount(m.Path("memory"))
|
||||
c, err := OOMKillCount(m.Path("memory"))
|
||||
// Ignore ENOENT when rootless as it couldn't create cgroup.
|
||||
if err != nil && m.rootless && os.IsNotExist(err) {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return c, err
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@ func (s *HugetlbGroup) Apply(path string, d *cgroupData) error {
|
|||
return join(path, d.pid)
|
||||
}
|
||||
|
||||
func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
for _, hugetlb := range cgroup.Resources.HugetlbLimit {
|
||||
func (s *HugetlbGroup) Set(path string, r *configs.Resources) error {
|
||||
for _, hugetlb := range r.HugetlbLimit {
|
||||
if err := fscommon.WriteFile(path, "hugetlb."+hugetlb.Pagesize+".limit_in_bytes", strconv.FormatUint(hugetlb.Limit, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -110,32 +110,32 @@ func setMemoryAndSwap(path string, r *configs.Resources) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if err := setMemoryAndSwap(path, cgroup.Resources); err != nil {
|
||||
func (s *MemoryGroup) Set(path string, r *configs.Resources) error {
|
||||
if err := setMemoryAndSwap(path, r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ignore KernelMemory and KernelMemoryTCP
|
||||
|
||||
if cgroup.Resources.MemoryReservation != 0 {
|
||||
if err := fscommon.WriteFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil {
|
||||
if r.MemoryReservation != 0 {
|
||||
if err := fscommon.WriteFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(r.MemoryReservation, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if cgroup.Resources.OomKillDisable {
|
||||
if r.OomKillDisable {
|
||||
if err := fscommon.WriteFile(path, "memory.oom_control", "1"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.Resources.MemorySwappiness == nil || int64(*cgroup.Resources.MemorySwappiness) == -1 {
|
||||
if r.MemorySwappiness == nil || int64(*r.MemorySwappiness) == -1 {
|
||||
return nil
|
||||
} else if *cgroup.Resources.MemorySwappiness <= 100 {
|
||||
if err := fscommon.WriteFile(path, "memory.swappiness", strconv.FormatUint(*cgroup.Resources.MemorySwappiness, 10)); err != nil {
|
||||
} else if *r.MemorySwappiness <= 100 {
|
||||
if err := fscommon.WriteFile(path, "memory.swappiness", strconv.FormatUint(*r.MemorySwappiness, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", *cgroup.Resources.MemorySwappiness)
|
||||
return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", *r.MemorySwappiness)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -224,7 +224,9 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) {
|
|||
|
||||
value, err := fscommon.GetCgroupParamUint(path, usage)
|
||||
if err != nil {
|
||||
if moduleName != "memory" && os.IsNotExist(err) {
|
||||
if name != "" && os.IsNotExist(err) {
|
||||
// Ignore ENOENT as swap and kmem controllers
|
||||
// are optional in the kernel.
|
||||
return cgroups.MemoryData{}, nil
|
||||
}
|
||||
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err)
|
||||
|
@ -232,25 +234,16 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) {
|
|||
memoryData.Usage = value
|
||||
value, err = fscommon.GetCgroupParamUint(path, maxUsage)
|
||||
if err != nil {
|
||||
if moduleName != "memory" && os.IsNotExist(err) {
|
||||
return cgroups.MemoryData{}, nil
|
||||
}
|
||||
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", maxUsage, err)
|
||||
}
|
||||
memoryData.MaxUsage = value
|
||||
value, err = fscommon.GetCgroupParamUint(path, failcnt)
|
||||
if err != nil {
|
||||
if moduleName != "memory" && os.IsNotExist(err) {
|
||||
return cgroups.MemoryData{}, nil
|
||||
}
|
||||
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err)
|
||||
}
|
||||
memoryData.Failcnt = value
|
||||
value, err = fscommon.GetCgroupParamUint(path, limit)
|
||||
if err != nil {
|
||||
if moduleName != "memory" && os.IsNotExist(err) {
|
||||
return cgroups.MemoryData{}, nil
|
||||
}
|
||||
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", limit, err)
|
||||
}
|
||||
memoryData.Limit = value
|
||||
|
|
|
@ -24,7 +24,7 @@ func (s *NameGroup) Apply(path string, d *cgroupData) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *NameGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
func (s *NameGroup) Set(_ string, _ *configs.Resources) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -21,9 +21,9 @@ func (s *NetClsGroup) Apply(path string, d *cgroupData) error {
|
|||
return join(path, d.pid)
|
||||
}
|
||||
|
||||
func (s *NetClsGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.Resources.NetClsClassid != 0 {
|
||||
if err := fscommon.WriteFile(path, "net_cls.classid", strconv.FormatUint(uint64(cgroup.Resources.NetClsClassid), 10)); err != nil {
|
||||
func (s *NetClsGroup) Set(path string, r *configs.Resources) error {
|
||||
if r.NetClsClassid != 0 {
|
||||
if err := fscommon.WriteFile(path, "net_cls.classid", strconv.FormatUint(uint64(r.NetClsClassid), 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@ func (s *NetPrioGroup) Apply(path string, d *cgroupData) error {
|
|||
return join(path, d.pid)
|
||||
}
|
||||
|
||||
func (s *NetPrioGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
for _, prioMap := range cgroup.Resources.NetPrioIfpriomap {
|
||||
func (s *NetPrioGroup) Set(path string, r *configs.Resources) error {
|
||||
for _, prioMap := range r.NetPrioIfpriomap {
|
||||
if err := fscommon.WriteFile(path, "net_prio.ifpriomap", prioMap.CgroupString()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ func (s *PerfEventGroup) Apply(path string, d *cgroupData) error {
|
|||
return join(path, d.pid)
|
||||
}
|
||||
|
||||
func (s *PerfEventGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
func (s *PerfEventGroup) Set(_ string, _ *configs.Resources) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -23,13 +23,13 @@ func (s *PidsGroup) Apply(path string, d *cgroupData) error {
|
|||
return join(path, d.pid)
|
||||
}
|
||||
|
||||
func (s *PidsGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.Resources.PidsLimit != 0 {
|
||||
func (s *PidsGroup) Set(path string, r *configs.Resources) error {
|
||||
if r.PidsLimit != 0 {
|
||||
// "max" is the fallback value.
|
||||
limit := "max"
|
||||
|
||||
if cgroup.Resources.PidsLimit > 0 {
|
||||
limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10)
|
||||
if r.PidsLimit > 0 {
|
||||
limit = strconv.FormatInt(r.PidsLimit, 10)
|
||||
}
|
||||
|
||||
if err := fscommon.WriteFile(path, "pids.max", limit); err != nil {
|
||||
|
|
|
@ -12,15 +12,14 @@ import (
|
|||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
func isCpuSet(cgroup *configs.Cgroup) bool {
|
||||
return cgroup.Resources.CpuWeight != 0 || cgroup.Resources.CpuQuota != 0 || cgroup.Resources.CpuPeriod != 0
|
||||
func isCpuSet(r *configs.Resources) bool {
|
||||
return r.CpuWeight != 0 || r.CpuQuota != 0 || r.CpuPeriod != 0
|
||||
}
|
||||
|
||||
func setCpu(dirPath string, cgroup *configs.Cgroup) error {
|
||||
if !isCpuSet(cgroup) {
|
||||
func setCpu(dirPath string, r *configs.Resources) error {
|
||||
if !isCpuSet(r) {
|
||||
return nil
|
||||
}
|
||||
r := cgroup.Resources
|
||||
|
||||
// NOTE: .CpuShares is not used here. Conversion is the caller's responsibility.
|
||||
if r.CpuWeight != 0 {
|
||||
|
@ -70,6 +69,15 @@ func statCpu(dirPath string, stats *cgroups.Stats) error {
|
|||
|
||||
case "system_usec":
|
||||
stats.CpuStats.CpuUsage.UsageInKernelmode = v * 1000
|
||||
|
||||
case "nr_periods":
|
||||
stats.CpuStats.ThrottlingData.Periods = v
|
||||
|
||||
case "nr_throttled":
|
||||
stats.CpuStats.ThrottlingData.ThrottledPeriods = v
|
||||
|
||||
case "throttled_usec":
|
||||
stats.CpuStats.ThrottlingData.ThrottledTime = v * 1000
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -7,22 +7,22 @@ import (
|
|||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
func isCpusetSet(cgroup *configs.Cgroup) bool {
|
||||
return cgroup.Resources.CpusetCpus != "" || cgroup.Resources.CpusetMems != ""
|
||||
func isCpusetSet(r *configs.Resources) bool {
|
||||
return r.CpusetCpus != "" || r.CpusetMems != ""
|
||||
}
|
||||
|
||||
func setCpuset(dirPath string, cgroup *configs.Cgroup) error {
|
||||
if !isCpusetSet(cgroup) {
|
||||
func setCpuset(dirPath string, r *configs.Resources) error {
|
||||
if !isCpusetSet(r) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if cgroup.Resources.CpusetCpus != "" {
|
||||
if err := fscommon.WriteFile(dirPath, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil {
|
||||
if r.CpusetCpus != "" {
|
||||
if err := fscommon.WriteFile(dirPath, "cpuset.cpus", r.CpusetCpus); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.Resources.CpusetMems != "" {
|
||||
if err := fscommon.WriteFile(dirPath, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil {
|
||||
if r.CpusetMems != "" {
|
||||
if err := fscommon.WriteFile(dirPath, "cpuset.mems", r.CpusetMems); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
func supportedControllers(cgroup *configs.Cgroup) (string, error) {
|
||||
func supportedControllers() (string, error) {
|
||||
return fscommon.ReadFile(UnifiedMountpoint, "/cgroup.controllers")
|
||||
}
|
||||
|
||||
|
@ -18,13 +18,13 @@ func supportedControllers(cgroup *configs.Cgroup) (string, error) {
|
|||
// based on (1) controllers available and (2) resources that are being set.
|
||||
// We don't check "pseudo" controllers such as
|
||||
// "freezer" and "devices".
|
||||
func needAnyControllers(cgroup *configs.Cgroup) (bool, error) {
|
||||
if cgroup == nil {
|
||||
func needAnyControllers(r *configs.Resources) (bool, error) {
|
||||
if r == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// list of all available controllers
|
||||
content, err := supportedControllers(cgroup)
|
||||
content, err := supportedControllers()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -39,22 +39,22 @@ func needAnyControllers(cgroup *configs.Cgroup) (bool, error) {
|
|||
return ok
|
||||
}
|
||||
|
||||
if isPidsSet(cgroup) && have("pids") {
|
||||
if isPidsSet(r) && have("pids") {
|
||||
return true, nil
|
||||
}
|
||||
if isMemorySet(cgroup) && have("memory") {
|
||||
if isMemorySet(r) && have("memory") {
|
||||
return true, nil
|
||||
}
|
||||
if isIoSet(cgroup) && have("io") {
|
||||
if isIoSet(r) && have("io") {
|
||||
return true, nil
|
||||
}
|
||||
if isCpuSet(cgroup) && have("cpu") {
|
||||
if isCpuSet(r) && have("cpu") {
|
||||
return true, nil
|
||||
}
|
||||
if isCpusetSet(cgroup) && have("cpuset") {
|
||||
if isCpusetSet(r) && have("cpuset") {
|
||||
return true, nil
|
||||
}
|
||||
if isHugeTlbSet(cgroup) && have("hugetlb") {
|
||||
if isHugeTlbSet(r) && have("hugetlb") {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -64,8 +64,8 @@ func needAnyControllers(cgroup *configs.Cgroup) (bool, error) {
|
|||
// containsDomainController returns whether the current config contains domain controller or not.
|
||||
// Refer to: http://man7.org/linux/man-pages/man7/cgroups.7.html
|
||||
// As at Linux 4.19, the following controllers are threaded: cpu, perf_event, and pids.
|
||||
func containsDomainController(cg *configs.Cgroup) bool {
|
||||
return isMemorySet(cg) || isIoSet(cg) || isCpuSet(cg) || isHugeTlbSet(cg)
|
||||
func containsDomainController(r *configs.Resources) bool {
|
||||
return isMemorySet(r) || isIoSet(r) || isCpuSet(r) || isHugeTlbSet(r)
|
||||
}
|
||||
|
||||
// CreateCgroupPath creates cgroupv2 path, enabling all the supported controllers.
|
||||
|
@ -74,7 +74,7 @@ func CreateCgroupPath(path string, c *configs.Cgroup) (Err error) {
|
|||
return fmt.Errorf("invalid cgroup path %s", path)
|
||||
}
|
||||
|
||||
content, err := supportedControllers(c)
|
||||
content, err := supportedControllers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ func CreateCgroupPath(path string, c *configs.Cgroup) (Err error) {
|
|||
// the controllers requested are thread-aware we can simply put the cgroup into
|
||||
// threaded mode.
|
||||
case "domain invalid":
|
||||
if containsDomainController(c) {
|
||||
if containsDomainController(c.Resources) {
|
||||
return fmt.Errorf("cannot enter cgroupv2 %q with domain controllers -- it is in an invalid state", current)
|
||||
} else {
|
||||
// Not entirely correct (in theory we'd always want to be a domain --
|
||||
|
@ -129,7 +129,7 @@ func CreateCgroupPath(path string, c *configs.Cgroup) (Err error) {
|
|||
case "domain threaded":
|
||||
fallthrough
|
||||
case "threaded":
|
||||
if containsDomainController(c) {
|
||||
if containsDomainController(c.Resources) {
|
||||
return fmt.Errorf("cannot enter cgroupv2 %q with domain controllers -- it is in %s mode", current, cgType)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ func isRWM(perms devices.Permissions) bool {
|
|||
|
||||
// This is similar to the logic applied in crun for handling errors from bpf(2)
|
||||
// <https://github.com/containers/crun/blob/0.17/src/libcrun/cgroup.c#L2438-L2470>.
|
||||
func canSkipEBPFError(cgroup *configs.Cgroup) bool {
|
||||
func canSkipEBPFError(r *configs.Resources) bool {
|
||||
// If we're running in a user namespace we can ignore eBPF rules because we
|
||||
// usually cannot use bpf(2), as well as rootless containers usually don't
|
||||
// have the necessary privileges to mknod(2) device inodes or access
|
||||
|
@ -46,7 +46,7 @@ func canSkipEBPFError(cgroup *configs.Cgroup) bool {
|
|||
// NOTE: This will sometimes trigger in cases where access modes are split
|
||||
// between different rules but to handle this correctly would require
|
||||
// using ".../libcontainer/cgroup/devices".Emulator.
|
||||
for _, dev := range cgroup.Resources.Devices {
|
||||
for _, dev := range r.Devices {
|
||||
if !dev.Allow || !isRWM(dev.Permissions) {
|
||||
return false
|
||||
}
|
||||
|
@ -54,15 +54,14 @@ func canSkipEBPFError(cgroup *configs.Cgroup) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func setDevices(dirPath string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.SkipDevices {
|
||||
func setDevices(dirPath string, r *configs.Resources) error {
|
||||
if r.SkipDevices {
|
||||
return nil
|
||||
}
|
||||
// XXX: This is currently a white-list (but all callers pass a blacklist of
|
||||
// devices). This is bad for a whole variety of reasons, but will need
|
||||
// to be fixed with co-ordinated effort with downstreams.
|
||||
devices := cgroup.Devices
|
||||
insts, license, err := devicefilter.DeviceFilter(devices)
|
||||
insts, license, err := devicefilter.DeviceFilter(r.Devices)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -83,7 +82,7 @@ func setDevices(dirPath string, cgroup *configs.Cgroup) error {
|
|||
// programs. You could temporarily insert a deny-everything program
|
||||
// but that would result in spurrious failures during updates.
|
||||
if _, err := ebpf.LoadAttachCgroupDeviceFilter(insts, license, dirFD); err != nil {
|
||||
if !canSkipEBPFError(cgroup) {
|
||||
if !canSkipEBPFError(r) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ func (m *manager) Apply(pid int) error {
|
|||
// - "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error"
|
||||
if m.rootless {
|
||||
if m.config.Path == "" {
|
||||
if blNeed, nErr := needAnyControllers(m.config); nErr == nil && !blNeed {
|
||||
if blNeed, nErr := needAnyControllers(m.config.Resources); nErr == nil && !blNeed {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "rootless needs no limits + no cgrouppath when no permission is granted for cgroups")
|
||||
|
@ -103,43 +103,27 @@ func (m *manager) GetStats() (*cgroups.Stats, error) {
|
|||
)
|
||||
|
||||
st := cgroups.NewStats()
|
||||
if err := m.getControllers(); err != nil {
|
||||
return st, err
|
||||
}
|
||||
|
||||
// pids (since kernel 4.5)
|
||||
if _, ok := m.controllers["pids"]; ok {
|
||||
if err := statPids(m.dirPath, st); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
} else {
|
||||
if err := statPidsWithoutController(m.dirPath, st); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if err := statPids(m.dirPath, st); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// memory (since kernel 4.5)
|
||||
if _, ok := m.controllers["memory"]; ok {
|
||||
if err := statMemory(m.dirPath, st); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if err := statMemory(m.dirPath, st); err != nil && !os.IsNotExist(err) {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// io (since kernel 4.5)
|
||||
if _, ok := m.controllers["io"]; ok {
|
||||
if err := statIo(m.dirPath, st); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if err := statIo(m.dirPath, st); err != nil && !os.IsNotExist(err) {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// cpu (since kernel 4.15)
|
||||
if _, ok := m.controllers["cpu"]; ok {
|
||||
if err := statCpu(m.dirPath, st); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// Note cpu.stat is available even if the controller is not enabled.
|
||||
if err := statCpu(m.dirPath, st); err != nil && !os.IsNotExist(err) {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// hugetlb (since kernel 5.6)
|
||||
if _, ok := m.controllers["hugetlb"]; ok {
|
||||
if err := statHugeTlb(m.dirPath, st); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if err := statHugeTlb(m.dirPath, st); err != nil && !os.IsNotExist(err) {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if len(errs) > 0 && !m.rootless {
|
||||
return st, errors.Errorf("error while statting cgroup v2: %+v", errs)
|
||||
|
@ -163,53 +147,50 @@ func (m *manager) Path(_ string) string {
|
|||
return m.dirPath
|
||||
}
|
||||
|
||||
func (m *manager) Set(container *configs.Config) error {
|
||||
if container == nil || container.Cgroups == nil {
|
||||
return nil
|
||||
}
|
||||
func (m *manager) Set(r *configs.Resources) error {
|
||||
if err := m.getControllers(); err != nil {
|
||||
return err
|
||||
}
|
||||
// pids (since kernel 4.5)
|
||||
if err := setPids(m.dirPath, container.Cgroups); err != nil {
|
||||
if err := setPids(m.dirPath, r); err != nil {
|
||||
return err
|
||||
}
|
||||
// memory (since kernel 4.5)
|
||||
if err := setMemory(m.dirPath, container.Cgroups); err != nil {
|
||||
if err := setMemory(m.dirPath, r); err != nil {
|
||||
return err
|
||||
}
|
||||
// io (since kernel 4.5)
|
||||
if err := setIo(m.dirPath, container.Cgroups); err != nil {
|
||||
if err := setIo(m.dirPath, r); err != nil {
|
||||
return err
|
||||
}
|
||||
// cpu (since kernel 4.15)
|
||||
if err := setCpu(m.dirPath, container.Cgroups); err != nil {
|
||||
if err := setCpu(m.dirPath, r); err != nil {
|
||||
return err
|
||||
}
|
||||
// devices (since kernel 4.15, pseudo-controller)
|
||||
//
|
||||
// When m.Rootless is true, errors from the device subsystem are ignored because it is really not expected to work.
|
||||
// When m.rootless is true, errors from the device subsystem are ignored because it is really not expected to work.
|
||||
// However, errors from other subsystems are not ignored.
|
||||
// see @test "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error"
|
||||
if err := setDevices(m.dirPath, container.Cgroups); err != nil && !m.rootless {
|
||||
if err := setDevices(m.dirPath, r); err != nil && !m.rootless {
|
||||
return err
|
||||
}
|
||||
// cpuset (since kernel 5.0)
|
||||
if err := setCpuset(m.dirPath, container.Cgroups); err != nil {
|
||||
if err := setCpuset(m.dirPath, r); err != nil {
|
||||
return err
|
||||
}
|
||||
// hugetlb (since kernel 5.6)
|
||||
if err := setHugeTlb(m.dirPath, container.Cgroups); err != nil {
|
||||
if err := setHugeTlb(m.dirPath, r); err != nil {
|
||||
return err
|
||||
}
|
||||
// freezer (since kernel 5.2, pseudo-controller)
|
||||
if err := setFreezer(m.dirPath, container.Cgroups.Freezer); err != nil {
|
||||
if err := setFreezer(m.dirPath, r.Freezer); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.setUnified(container.Cgroups.Unified); err != nil {
|
||||
if err := m.setUnified(r.Unified); err != nil {
|
||||
return err
|
||||
}
|
||||
m.config = container.Cgroups
|
||||
m.config.Resources = r
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -263,5 +244,10 @@ func OOMKillCount(path string) (uint64, error) {
|
|||
}
|
||||
|
||||
func (m *manager) OOMKillCount() (uint64, error) {
|
||||
return OOMKillCount(m.dirPath)
|
||||
c, err := OOMKillCount(m.dirPath)
|
||||
if err != nil && m.rootless && os.IsNotExist(err) {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return c, err
|
||||
}
|
||||
|
|
|
@ -12,15 +12,15 @@ import (
|
|||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
func isHugeTlbSet(cgroup *configs.Cgroup) bool {
|
||||
return len(cgroup.Resources.HugetlbLimit) > 0
|
||||
func isHugeTlbSet(r *configs.Resources) bool {
|
||||
return len(r.HugetlbLimit) > 0
|
||||
}
|
||||
|
||||
func setHugeTlb(dirPath string, cgroup *configs.Cgroup) error {
|
||||
if !isHugeTlbSet(cgroup) {
|
||||
func setHugeTlb(dirPath string, r *configs.Resources) error {
|
||||
if !isHugeTlbSet(r) {
|
||||
return nil
|
||||
}
|
||||
for _, hugetlb := range cgroup.Resources.HugetlbLimit {
|
||||
for _, hugetlb := range r.HugetlbLimit {
|
||||
if err := fscommon.WriteFile(dirPath, "hugetlb."+hugetlb.Pagesize+".max", strconv.FormatUint(hugetlb.Limit, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -13,50 +13,50 @@ import (
|
|||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
func isIoSet(cgroup *configs.Cgroup) bool {
|
||||
return cgroup.Resources.BlkioWeight != 0 ||
|
||||
len(cgroup.Resources.BlkioThrottleReadBpsDevice) > 0 ||
|
||||
len(cgroup.Resources.BlkioThrottleWriteBpsDevice) > 0 ||
|
||||
len(cgroup.Resources.BlkioThrottleReadIOPSDevice) > 0 ||
|
||||
len(cgroup.Resources.BlkioThrottleWriteIOPSDevice) > 0
|
||||
func isIoSet(r *configs.Resources) bool {
|
||||
return r.BlkioWeight != 0 ||
|
||||
len(r.BlkioThrottleReadBpsDevice) > 0 ||
|
||||
len(r.BlkioThrottleWriteBpsDevice) > 0 ||
|
||||
len(r.BlkioThrottleReadIOPSDevice) > 0 ||
|
||||
len(r.BlkioThrottleWriteIOPSDevice) > 0
|
||||
}
|
||||
|
||||
func setIo(dirPath string, cgroup *configs.Cgroup) error {
|
||||
if !isIoSet(cgroup) {
|
||||
func setIo(dirPath string, r *configs.Resources) error {
|
||||
if !isIoSet(r) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if cgroup.Resources.BlkioWeight != 0 {
|
||||
if r.BlkioWeight != 0 {
|
||||
filename := "io.bfq.weight"
|
||||
if err := fscommon.WriteFile(dirPath, filename,
|
||||
strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil {
|
||||
strconv.FormatUint(uint64(r.BlkioWeight), 10)); err != nil {
|
||||
// if io.bfq.weight does not exist, then bfq module is not loaded.
|
||||
// Fallback to use io.weight with a conversion scheme
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
v := cgroups.ConvertBlkIOToIOWeightValue(cgroup.Resources.BlkioWeight)
|
||||
v := cgroups.ConvertBlkIOToIOWeightValue(r.BlkioWeight)
|
||||
if err := fscommon.WriteFile(dirPath, "io.weight", strconv.FormatUint(v, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice {
|
||||
for _, td := range r.BlkioThrottleReadBpsDevice {
|
||||
if err := fscommon.WriteFile(dirPath, "io.max", td.StringName("rbps")); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice {
|
||||
for _, td := range r.BlkioThrottleWriteBpsDevice {
|
||||
if err := fscommon.WriteFile(dirPath, "io.max", td.StringName("wbps")); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice {
|
||||
for _, td := range r.BlkioThrottleReadIOPSDevice {
|
||||
if err := fscommon.WriteFile(dirPath, "io.max", td.StringName("riops")); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice {
|
||||
for _, td := range r.BlkioThrottleWriteIOPSDevice {
|
||||
if err := fscommon.WriteFile(dirPath, "io.max", td.StringName("wiops")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -4,13 +4,16 @@ package fs2
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// numToStr converts an int64 value to a string for writing to a
|
||||
|
@ -30,21 +33,20 @@ func numToStr(value int64) (ret string) {
|
|||
return ret
|
||||
}
|
||||
|
||||
func isMemorySet(cgroup *configs.Cgroup) bool {
|
||||
return cgroup.Resources.MemoryReservation != 0 ||
|
||||
cgroup.Resources.Memory != 0 || cgroup.Resources.MemorySwap != 0
|
||||
func isMemorySet(r *configs.Resources) bool {
|
||||
return r.MemoryReservation != 0 || r.Memory != 0 || r.MemorySwap != 0
|
||||
}
|
||||
|
||||
func setMemory(dirPath string, cgroup *configs.Cgroup) error {
|
||||
if !isMemorySet(cgroup) {
|
||||
func setMemory(dirPath string, r *configs.Resources) error {
|
||||
if !isMemorySet(r) {
|
||||
return nil
|
||||
}
|
||||
swap, err := cgroups.ConvertMemorySwapToCgroupV2Value(cgroup.Resources.MemorySwap, cgroup.Resources.Memory)
|
||||
swap, err := cgroups.ConvertMemorySwapToCgroupV2Value(r.MemorySwap, r.Memory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
swapStr := numToStr(swap)
|
||||
if swapStr == "" && swap == 0 && cgroup.Resources.MemorySwap > 0 {
|
||||
if swapStr == "" && swap == 0 && r.MemorySwap > 0 {
|
||||
// memory and memorySwap set to the same value -- disable swap
|
||||
swapStr = "0"
|
||||
}
|
||||
|
@ -55,7 +57,7 @@ func setMemory(dirPath string, cgroup *configs.Cgroup) error {
|
|||
}
|
||||
}
|
||||
|
||||
if val := numToStr(cgroup.Resources.Memory); val != "" {
|
||||
if val := numToStr(r.Memory); val != "" {
|
||||
if err := fscommon.WriteFile(dirPath, "memory.max", val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -63,7 +65,7 @@ func setMemory(dirPath string, cgroup *configs.Cgroup) error {
|
|||
|
||||
// cgroup.Resources.KernelMemory is ignored
|
||||
|
||||
if val := numToStr(cgroup.Resources.MemoryReservation); val != "" {
|
||||
if val := numToStr(r.MemoryReservation); val != "" {
|
||||
if err := fscommon.WriteFile(dirPath, "memory.low", val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -89,9 +91,17 @@ func statMemory(dirPath string, stats *cgroups.Stats) error {
|
|||
stats.MemoryStats.Stats[t] = v
|
||||
}
|
||||
stats.MemoryStats.Cache = stats.MemoryStats.Stats["file"]
|
||||
// Unlike cgroup v1 which has memory.use_hierarchy binary knob,
|
||||
// cgroup v2 is always hierarchical.
|
||||
stats.MemoryStats.UseHierarchy = true
|
||||
|
||||
memoryUsage, err := getMemoryDataV2(dirPath, "")
|
||||
if err != nil {
|
||||
if errors.Is(err, unix.ENOENT) && dirPath == UnifiedMountpoint {
|
||||
// The root cgroup does not have memory.{current,max}
|
||||
// so emulate those using data from /proc/meminfo.
|
||||
return statsFromMeminfo(stats)
|
||||
}
|
||||
return err
|
||||
}
|
||||
stats.MemoryStats.Usage = memoryUsage
|
||||
|
@ -99,9 +109,15 @@ func statMemory(dirPath string, stats *cgroups.Stats) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// As cgroup v1 reports SwapUsage values as mem+swap combined,
|
||||
// while in cgroup v2 swap values do not include memory,
|
||||
// report combined mem+swap for v1 compatibility.
|
||||
swapUsage.Usage += memoryUsage.Usage
|
||||
if swapUsage.Limit != math.MaxUint64 {
|
||||
swapUsage.Limit += memoryUsage.Limit
|
||||
}
|
||||
stats.MemoryStats.SwapUsage = swapUsage
|
||||
|
||||
stats.MemoryStats.UseHierarchy = true
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -117,7 +133,10 @@ func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) {
|
|||
|
||||
value, err := fscommon.GetCgroupParamUint(path, usage)
|
||||
if err != nil {
|
||||
if moduleName != "memory" && os.IsNotExist(err) {
|
||||
if name != "" && os.IsNotExist(err) {
|
||||
// Ignore EEXIST as there's no swap accounting
|
||||
// if kernel CONFIG_MEMCG_SWAP is not set or
|
||||
// swapaccount=0 kernel boot parameter is given.
|
||||
return cgroups.MemoryData{}, nil
|
||||
}
|
||||
return cgroups.MemoryData{}, errors.Wrapf(err, "failed to parse %s", usage)
|
||||
|
@ -126,12 +145,69 @@ func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) {
|
|||
|
||||
value, err = fscommon.GetCgroupParamUint(path, limit)
|
||||
if err != nil {
|
||||
if moduleName != "memory" && os.IsNotExist(err) {
|
||||
return cgroups.MemoryData{}, nil
|
||||
}
|
||||
return cgroups.MemoryData{}, errors.Wrapf(err, "failed to parse %s", limit)
|
||||
}
|
||||
memoryData.Limit = value
|
||||
|
||||
return memoryData, nil
|
||||
}
|
||||
|
||||
func statsFromMeminfo(stats *cgroups.Stats) error {
|
||||
f, err := os.Open("/proc/meminfo")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Fields we are interested in.
|
||||
var (
|
||||
swap_free uint64
|
||||
swap_total uint64
|
||||
main_total uint64
|
||||
main_free uint64
|
||||
)
|
||||
mem := map[string]*uint64{
|
||||
"SwapFree": &swap_free,
|
||||
"SwapTotal": &swap_total,
|
||||
"MemTotal": &main_total,
|
||||
"MemFree": &main_free,
|
||||
}
|
||||
|
||||
found := 0
|
||||
sc := bufio.NewScanner(f)
|
||||
for sc.Scan() {
|
||||
parts := strings.SplitN(sc.Text(), ":", 3)
|
||||
if len(parts) != 2 {
|
||||
// Should not happen.
|
||||
continue
|
||||
}
|
||||
k := parts[0]
|
||||
p, ok := mem[k]
|
||||
if !ok {
|
||||
// Unknown field -- not interested.
|
||||
continue
|
||||
}
|
||||
vStr := strings.TrimSpace(strings.TrimSuffix(parts[1], " kB"))
|
||||
*p, err = strconv.ParseUint(vStr, 10, 64)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "parsing /proc/meminfo "+k)
|
||||
}
|
||||
|
||||
found++
|
||||
if found == len(mem) {
|
||||
// Got everything we need -- skip the rest.
|
||||
break
|
||||
}
|
||||
}
|
||||
if sc.Err() != nil {
|
||||
return sc.Err()
|
||||
}
|
||||
|
||||
stats.MemoryStats.SwapUsage.Usage = (swap_total - swap_free) * 1024
|
||||
stats.MemoryStats.SwapUsage.Limit = math.MaxUint64
|
||||
|
||||
stats.MemoryStats.Usage.Usage = (main_total - main_free) * 1024
|
||||
stats.MemoryStats.Usage.Limit = math.MaxUint64
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
package fs2
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
|
@ -13,15 +14,15 @@ import (
|
|||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func isPidsSet(cgroup *configs.Cgroup) bool {
|
||||
return cgroup.Resources.PidsLimit != 0
|
||||
func isPidsSet(r *configs.Resources) bool {
|
||||
return r.PidsLimit != 0
|
||||
}
|
||||
|
||||
func setPids(dirPath string, cgroup *configs.Cgroup) error {
|
||||
if !isPidsSet(cgroup) {
|
||||
func setPids(dirPath string, r *configs.Resources) error {
|
||||
if !isPidsSet(r) {
|
||||
return nil
|
||||
}
|
||||
if val := numToStr(cgroup.Resources.PidsLimit); val != "" {
|
||||
if val := numToStr(r.PidsLimit); val != "" {
|
||||
if err := fscommon.WriteFile(dirPath, "pids.max", val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -30,7 +31,7 @@ func setPids(dirPath string, cgroup *configs.Cgroup) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func statPidsWithoutController(dirPath string, stats *cgroups.Stats) error {
|
||||
func statPidsFromCgroupProcs(dirPath string, stats *cgroups.Stats) error {
|
||||
// if the controller is not enabled, let's read PIDS from cgroups.procs
|
||||
// (or threads if cgroup.threads is enabled)
|
||||
contents, err := fscommon.ReadFile(dirPath, "cgroup.procs")
|
||||
|
@ -40,13 +41,8 @@ func statPidsWithoutController(dirPath string, stats *cgroups.Stats) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pids := make(map[string]string)
|
||||
for _, i := range strings.Split(contents, "\n") {
|
||||
if i != "" {
|
||||
pids[i] = i
|
||||
}
|
||||
}
|
||||
stats.PidsStats.Current = uint64(len(pids))
|
||||
pids := strings.Count(contents, "\n")
|
||||
stats.PidsStats.Current = uint64(pids)
|
||||
stats.PidsStats.Limit = 0
|
||||
return nil
|
||||
}
|
||||
|
@ -54,6 +50,9 @@ func statPidsWithoutController(dirPath string, stats *cgroups.Stats) error {
|
|||
func statPids(dirPath string, stats *cgroups.Stats) error {
|
||||
current, err := fscommon.GetCgroupParamUint(dirPath, "pids.current")
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return statPidsFromCgroupProcs(dirPath, stats)
|
||||
}
|
||||
return errors.Wrap(err, "failed to parse pids.current")
|
||||
}
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
|
@ -17,7 +16,7 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
// Set to true by fs unit tests
|
||||
// TestMode is set to true by unit tests that need "fake" cgroupfs.
|
||||
TestMode bool
|
||||
|
||||
cgroupFd int = -1
|
||||
|
@ -71,12 +70,12 @@ func OpenFile(dir, file string, flags int) (*os.File, error) {
|
|||
flags |= os.O_TRUNC | os.O_CREATE
|
||||
mode = 0o600
|
||||
}
|
||||
if prepareOpenat2() != nil {
|
||||
return openFallback(dir, file, flags, mode)
|
||||
}
|
||||
reldir := strings.TrimPrefix(dir, cgroupfsPrefix)
|
||||
if len(reldir) == len(dir) { // non-standard path, old system?
|
||||
return openWithSecureJoin(dir, file, flags, mode)
|
||||
}
|
||||
if prepareOpenat2() != nil {
|
||||
return openWithSecureJoin(dir, file, flags, mode)
|
||||
return openFallback(dir, file, flags, mode)
|
||||
}
|
||||
|
||||
relname := reldir + "/" + file
|
||||
|
@ -93,11 +92,29 @@ func OpenFile(dir, file string, flags int) (*os.File, error) {
|
|||
return os.NewFile(uintptr(fd), cgroupfsPrefix+relname), nil
|
||||
}
|
||||
|
||||
func openWithSecureJoin(dir, file string, flags int, mode os.FileMode) (*os.File, error) {
|
||||
path, err := securejoin.SecureJoin(dir, file)
|
||||
var errNotCgroupfs = errors.New("not a cgroup file")
|
||||
|
||||
// openFallback is used when openat2(2) is not available. It checks the opened
|
||||
// file is on cgroupfs, returning an error otherwise.
|
||||
func openFallback(dir, file string, flags int, mode os.FileMode) (*os.File, error) {
|
||||
path := dir + "/" + file
|
||||
fd, err := os.OpenFile(path, flags, mode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if TestMode {
|
||||
return fd, nil
|
||||
}
|
||||
// Check this is a cgroupfs file.
|
||||
var st unix.Statfs_t
|
||||
if err := unix.Fstatfs(int(fd.Fd()), &st); err != nil {
|
||||
_ = fd.Close()
|
||||
return nil, &os.PathError{Op: "statfs", Path: path, Err: err}
|
||||
}
|
||||
if st.Type != unix.CGROUP_SUPER_MAGIC && st.Type != unix.CGROUP2_SUPER_MAGIC {
|
||||
_ = fd.Close()
|
||||
return nil, &os.PathError{Op: "open", Path: path, Err: errNotCgroupfs}
|
||||
}
|
||||
|
||||
return os.OpenFile(path, flags, mode)
|
||||
return fd, nil
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package systemd
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
|
@ -28,10 +29,6 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
connOnce sync.Once
|
||||
connDbus *systemdDbus.Conn
|
||||
connErr error
|
||||
|
||||
versionOnce sync.Once
|
||||
version int
|
||||
|
||||
|
@ -291,19 +288,6 @@ func generateDeviceProperties(rules []*devices.Rule) ([]systemdDbus.Property, er
|
|||
return properties, nil
|
||||
}
|
||||
|
||||
// getDbusConnection lazy initializes systemd dbus connection
|
||||
// and returns it
|
||||
func getDbusConnection(rootless bool) (*systemdDbus.Conn, error) {
|
||||
connOnce.Do(func() {
|
||||
if rootless {
|
||||
connDbus, connErr = NewUserSystemdDbus()
|
||||
} else {
|
||||
connDbus, connErr = systemdDbus.New()
|
||||
}
|
||||
})
|
||||
return connDbus, connErr
|
||||
}
|
||||
|
||||
func newProp(name string, units interface{}) systemdDbus.Property {
|
||||
return systemdDbus.Property{
|
||||
Name: name,
|
||||
|
@ -319,32 +303,42 @@ func getUnitName(c *configs.Cgroup) string {
|
|||
return c.Name
|
||||
}
|
||||
|
||||
// isUnitExists returns true if the error is that a systemd unit already exists.
|
||||
func isUnitExists(err error) bool {
|
||||
// isDbusError returns true if the error is a specific dbus error.
|
||||
func isDbusError(err error, name string) bool {
|
||||
if err != nil {
|
||||
if dbusError, ok := err.(dbus.Error); ok {
|
||||
return strings.Contains(dbusError.Name, "org.freedesktop.systemd1.UnitExists")
|
||||
var derr *dbus.Error
|
||||
if errors.As(err, &derr) {
|
||||
return strings.Contains(derr.Name, name)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func startUnit(dbusConnection *systemdDbus.Conn, unitName string, properties []systemdDbus.Property) error {
|
||||
// isUnitExists returns true if the error is that a systemd unit already exists.
|
||||
func isUnitExists(err error) bool {
|
||||
return isDbusError(err, "org.freedesktop.systemd1.UnitExists")
|
||||
}
|
||||
|
||||
func startUnit(cm *dbusConnManager, unitName string, properties []systemdDbus.Property) error {
|
||||
statusChan := make(chan string, 1)
|
||||
if _, err := dbusConnection.StartTransientUnit(unitName, "replace", properties, statusChan); err == nil {
|
||||
err := cm.retryOnDisconnect(func(c *systemdDbus.Conn) error {
|
||||
_, err := c.StartTransientUnitContext(context.TODO(), unitName, "replace", properties, statusChan)
|
||||
return err
|
||||
})
|
||||
if err == nil {
|
||||
timeout := time.NewTimer(30 * time.Second)
|
||||
defer timeout.Stop()
|
||||
|
||||
select {
|
||||
case s := <-statusChan:
|
||||
close(statusChan)
|
||||
// Please refer to https://godoc.org/github.com/coreos/go-systemd/dbus#Conn.StartUnit
|
||||
// Please refer to https://pkg.go.dev/github.com/coreos/go-systemd/v22/dbus#Conn.StartUnit
|
||||
if s != "done" {
|
||||
dbusConnection.ResetFailedUnit(unitName)
|
||||
resetFailedUnit(cm, unitName)
|
||||
return errors.Errorf("error creating systemd unit `%s`: got `%s`", unitName, s)
|
||||
}
|
||||
case <-timeout.C:
|
||||
dbusConnection.ResetFailedUnit(unitName)
|
||||
resetFailedUnit(cm, unitName)
|
||||
return errors.New("Timeout waiting for systemd to create " + unitName)
|
||||
}
|
||||
} else if !isUnitExists(err) {
|
||||
|
@ -354,13 +348,17 @@ func startUnit(dbusConnection *systemdDbus.Conn, unitName string, properties []s
|
|||
return nil
|
||||
}
|
||||
|
||||
func stopUnit(dbusConnection *systemdDbus.Conn, unitName string) error {
|
||||
func stopUnit(cm *dbusConnManager, unitName string) error {
|
||||
statusChan := make(chan string, 1)
|
||||
if _, err := dbusConnection.StopUnit(unitName, "replace", statusChan); err == nil {
|
||||
err := cm.retryOnDisconnect(func(c *systemdDbus.Conn) error {
|
||||
_, err := c.StopUnitContext(context.TODO(), unitName, "replace", statusChan)
|
||||
return err
|
||||
})
|
||||
if err == nil {
|
||||
select {
|
||||
case s := <-statusChan:
|
||||
close(statusChan)
|
||||
// Please refer to https://godoc.org/github.com/coreos/go-systemd/dbus#Conn.StartUnit
|
||||
// Please refer to https://godoc.org/github.com/coreos/go-systemd/v22/dbus#Conn.StartUnit
|
||||
if s != "done" {
|
||||
logrus.Warnf("error removing unit `%s`: got `%s`. Continuing...", unitName, s)
|
||||
}
|
||||
|
@ -371,10 +369,38 @@ func stopUnit(dbusConnection *systemdDbus.Conn, unitName string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func systemdVersion(conn *systemdDbus.Conn) int {
|
||||
func resetFailedUnit(cm *dbusConnManager, name string) {
|
||||
err := cm.retryOnDisconnect(func(c *systemdDbus.Conn) error {
|
||||
return c.ResetFailedUnitContext(context.TODO(), name)
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Warnf("unable to reset failed unit: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func setUnitProperties(cm *dbusConnManager, name string, properties ...systemdDbus.Property) error {
|
||||
return cm.retryOnDisconnect(func(c *systemdDbus.Conn) error {
|
||||
return c.SetUnitPropertiesContext(context.TODO(), name, true, properties...)
|
||||
})
|
||||
}
|
||||
|
||||
func getManagerProperty(cm *dbusConnManager, name string) (string, error) {
|
||||
str := ""
|
||||
err := cm.retryOnDisconnect(func(c *systemdDbus.Conn) error {
|
||||
var err error
|
||||
str, err = c.GetManagerProperty(name)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strconv.Unquote(str)
|
||||
}
|
||||
|
||||
func systemdVersion(cm *dbusConnManager) int {
|
||||
versionOnce.Do(func() {
|
||||
version = -1
|
||||
verStr, err := conn.GetManagerProperty("Version")
|
||||
verStr, err := getManagerProperty(cm, "Version")
|
||||
if err == nil {
|
||||
version, err = systemdVersionAtoi(verStr)
|
||||
}
|
||||
|
@ -389,11 +415,11 @@ func systemdVersion(conn *systemdDbus.Conn) int {
|
|||
|
||||
func systemdVersionAtoi(verStr string) (int, error) {
|
||||
// verStr should be of the form:
|
||||
// "v245.4-1.fc32", "245", "v245-1.fc32", "245-1.fc32"
|
||||
// all the input strings include quotes, and the output int should be 245
|
||||
// thus, we unconditionally remove the `"v`
|
||||
// and then match on the first integer we can grab
|
||||
re := regexp.MustCompile(`"?v?([0-9]+)`)
|
||||
// "v245.4-1.fc32", "245", "v245-1.fc32", "245-1.fc32" (without quotes).
|
||||
// The result for all of the above should be 245.
|
||||
// Thus, we unconditionally remove the "v" prefix
|
||||
// and then match on the first integer we can grab.
|
||||
re := regexp.MustCompile(`v?([0-9]+)`)
|
||||
matches := re.FindStringSubmatch(verStr)
|
||||
if len(matches) < 2 {
|
||||
return 0, errors.Errorf("can't parse version %s: incorrect number of matches %v", verStr, matches)
|
||||
|
@ -402,10 +428,10 @@ func systemdVersionAtoi(verStr string) (int, error) {
|
|||
return ver, errors.Wrapf(err, "can't parse version %s", verStr)
|
||||
}
|
||||
|
||||
func addCpuQuota(conn *systemdDbus.Conn, properties *[]systemdDbus.Property, quota int64, period uint64) {
|
||||
func addCpuQuota(cm *dbusConnManager, properties *[]systemdDbus.Property, quota int64, period uint64) {
|
||||
if period != 0 {
|
||||
// systemd only supports CPUQuotaPeriodUSec since v242
|
||||
sdVer := systemdVersion(conn)
|
||||
sdVer := systemdVersion(cm)
|
||||
if sdVer >= 242 {
|
||||
*properties = append(*properties,
|
||||
newProp("CPUQuotaPeriodUSec", period))
|
||||
|
@ -436,13 +462,13 @@ func addCpuQuota(conn *systemdDbus.Conn, properties *[]systemdDbus.Property, quo
|
|||
}
|
||||
}
|
||||
|
||||
func addCpuset(conn *systemdDbus.Conn, props *[]systemdDbus.Property, cpus, mems string) error {
|
||||
func addCpuset(cm *dbusConnManager, props *[]systemdDbus.Property, cpus, mems string) error {
|
||||
if cpus == "" && mems == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// systemd only supports AllowedCPUs/AllowedMemoryNodes since v244
|
||||
sdVer := systemdVersion(conn)
|
||||
sdVer := systemdVersion(cm)
|
||||
if sdVer < 244 {
|
||||
logrus.Debugf("systemd v%d is too old to support AllowedCPUs/AllowedMemoryNodes"+
|
||||
" (settings will still be applied to cgroupfs)", sdVer)
|
||||
|
|
96
vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/dbus.go
generated
vendored
Normal file
96
vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/dbus.go
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
// +build linux
|
||||
|
||||
package systemd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
|
||||
dbus "github.com/godbus/dbus/v5"
|
||||
)
|
||||
|
||||
var (
|
||||
dbusC *systemdDbus.Conn
|
||||
dbusMu sync.RWMutex
|
||||
dbusInited bool
|
||||
dbusRootless bool
|
||||
)
|
||||
|
||||
type dbusConnManager struct {
|
||||
}
|
||||
|
||||
// newDbusConnManager initializes systemd dbus connection manager.
|
||||
func newDbusConnManager(rootless bool) *dbusConnManager {
|
||||
if dbusInited && rootless != dbusRootless {
|
||||
panic("can't have both root and rootless dbus")
|
||||
}
|
||||
dbusRootless = rootless
|
||||
return &dbusConnManager{}
|
||||
}
|
||||
|
||||
// getConnection lazily initializes and returns systemd dbus connection.
|
||||
func (d *dbusConnManager) getConnection() (*systemdDbus.Conn, error) {
|
||||
// In the case where dbusC != nil
|
||||
// Use the read lock the first time to ensure
|
||||
// that Conn can be acquired at the same time.
|
||||
dbusMu.RLock()
|
||||
if conn := dbusC; conn != nil {
|
||||
dbusMu.RUnlock()
|
||||
return conn, nil
|
||||
}
|
||||
dbusMu.RUnlock()
|
||||
|
||||
// In the case where dbusC == nil
|
||||
// Use write lock to ensure that only one
|
||||
// will be created
|
||||
dbusMu.Lock()
|
||||
defer dbusMu.Unlock()
|
||||
if conn := dbusC; conn != nil {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
conn, err := d.newConnection()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbusC = conn
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func (d *dbusConnManager) newConnection() (*systemdDbus.Conn, error) {
|
||||
if dbusRootless {
|
||||
return newUserSystemdDbus()
|
||||
}
|
||||
return systemdDbus.NewWithContext(context.TODO())
|
||||
}
|
||||
|
||||
// resetConnection resets the connection to its initial state
|
||||
// (so it can be reconnected if necessary).
|
||||
func (d *dbusConnManager) resetConnection(conn *systemdDbus.Conn) {
|
||||
dbusMu.Lock()
|
||||
defer dbusMu.Unlock()
|
||||
if dbusC != nil && dbusC == conn {
|
||||
dbusC.Close()
|
||||
dbusC = nil
|
||||
}
|
||||
}
|
||||
|
||||
var errDbusConnClosed = dbus.ErrClosed.Error()
|
||||
|
||||
// retryOnDisconnect calls op, and if the error it returns is about closed dbus
|
||||
// connection, the connection is re-established and the op is retried. This helps
|
||||
// with the situation when dbus is restarted and we have a stale connection.
|
||||
func (d *dbusConnManager) retryOnDisconnect(op func(*systemdDbus.Conn) error) error {
|
||||
for {
|
||||
conn, err := d.getConnection()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = op(conn)
|
||||
if !isDbusError(err, errDbusConnClosed) {
|
||||
return err
|
||||
}
|
||||
d.resetConnection(conn)
|
||||
}
|
||||
}
|
|
@ -17,8 +17,8 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewUserSystemdDbus creates a connection for systemd user-instance.
|
||||
func NewUserSystemdDbus() (*systemdDbus.Conn, error) {
|
||||
// newUserSystemdDbus creates a connection for systemd user-instance.
|
||||
func newUserSystemdDbus() (*systemdDbus.Conn, error) {
|
||||
addr, err := DetectUserDbusSessionBusAddress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -20,12 +20,14 @@ type legacyManager struct {
|
|||
mu sync.Mutex
|
||||
cgroups *configs.Cgroup
|
||||
paths map[string]string
|
||||
dbus *dbusConnManager
|
||||
}
|
||||
|
||||
func NewLegacyManager(cg *configs.Cgroup, paths map[string]string) cgroups.Manager {
|
||||
return &legacyManager{
|
||||
cgroups: cg,
|
||||
paths: paths,
|
||||
dbus: newDbusConnManager(false),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,8 +36,8 @@ type subsystem interface {
|
|||
Name() string
|
||||
// Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
|
||||
GetStats(path string, stats *cgroups.Stats) error
|
||||
// Set the cgroup represented by cgroup.
|
||||
Set(path string, cgroup *configs.Cgroup) error
|
||||
// Set sets cgroup resource limits.
|
||||
Set(path string, r *configs.Resources) error
|
||||
}
|
||||
|
||||
var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist")
|
||||
|
@ -56,9 +58,8 @@ var legacySubsystems = []subsystem{
|
|||
&fs.NameGroup{GroupName: "name=systemd"},
|
||||
}
|
||||
|
||||
func genV1ResourcesProperties(c *configs.Cgroup, conn *systemdDbus.Conn) ([]systemdDbus.Property, error) {
|
||||
func genV1ResourcesProperties(r *configs.Resources, cm *dbusConnManager) ([]systemdDbus.Property, error) {
|
||||
var properties []systemdDbus.Property
|
||||
r := c.Resources
|
||||
|
||||
deviceProperties, err := generateDeviceProperties(r.Devices)
|
||||
if err != nil {
|
||||
|
@ -76,7 +77,7 @@ func genV1ResourcesProperties(c *configs.Cgroup, conn *systemdDbus.Conn) ([]syst
|
|||
newProp("CPUShares", r.CpuShares))
|
||||
}
|
||||
|
||||
addCpuQuota(conn, &properties, r.CpuQuota, r.CpuPeriod)
|
||||
addCpuQuota(cm, &properties, r.CpuQuota, r.CpuPeriod)
|
||||
|
||||
if r.BlkioWeight != 0 {
|
||||
properties = append(properties,
|
||||
|
@ -88,7 +89,7 @@ func genV1ResourcesProperties(c *configs.Cgroup, conn *systemdDbus.Conn) ([]syst
|
|||
newProp("TasksMax", uint64(r.PidsLimit)))
|
||||
}
|
||||
|
||||
err = addCpuset(conn, &properties, r.CpusetCpus, r.CpusetMems)
|
||||
err = addCpuset(cm, &properties, r.CpusetCpus, r.CpusetMems)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -164,13 +165,9 @@ func (m *legacyManager) Apply(pid int) error {
|
|||
properties = append(properties,
|
||||
newProp("DefaultDependencies", false))
|
||||
|
||||
dbusConnection, err := getDbusConnection(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
properties = append(properties, c.SystemdProps...)
|
||||
|
||||
if err := startUnit(dbusConnection, unitName, properties); err != nil {
|
||||
if err := startUnit(m.dbus, unitName, properties); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -208,13 +205,8 @@ func (m *legacyManager) Destroy() error {
|
|||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
dbusConnection, err := getDbusConnection(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
unitName := getUnitName(m.cgroups)
|
||||
stopErr := stopUnit(m.dbus, getUnitName(m.cgroups))
|
||||
|
||||
stopErr := stopUnit(dbusConnection, unitName)
|
||||
// Both on success and on error, cleanup all the cgroups we are aware of.
|
||||
// Some of them were created directly by Apply() and are not managed by systemd.
|
||||
if err := cgroups.RemovePaths(m.paths); err != nil {
|
||||
|
@ -239,7 +231,7 @@ func (m *legacyManager) joinCgroups(pid int) error {
|
|||
case "cpuset":
|
||||
if path, ok := m.paths[name]; ok {
|
||||
s := &fs.CpusetGroup{}
|
||||
if err := s.ApplyDir(path, m.cgroups, pid); err != nil {
|
||||
if err := s.ApplyDir(path, m.cgroups.Resources, pid); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -292,7 +284,7 @@ func (m *legacyManager) Freeze(state configs.FreezerState) error {
|
|||
prevState := m.cgroups.Resources.Freezer
|
||||
m.cgroups.Resources.Freezer = state
|
||||
freezer := &fs.FreezerGroup{}
|
||||
if err := freezer.Set(path, m.cgroups); err != nil {
|
||||
if err := freezer.Set(path, m.cgroups.Resources); err != nil {
|
||||
m.cgroups.Resources.Freezer = prevState
|
||||
return err
|
||||
}
|
||||
|
@ -332,20 +324,16 @@ func (m *legacyManager) GetStats() (*cgroups.Stats, error) {
|
|||
return stats, nil
|
||||
}
|
||||
|
||||
func (m *legacyManager) Set(container *configs.Config) error {
|
||||
func (m *legacyManager) Set(r *configs.Resources) error {
|
||||
// If Paths are set, then we are just joining cgroups paths
|
||||
// and there is no need to set any values.
|
||||
if m.cgroups.Paths != nil {
|
||||
return nil
|
||||
}
|
||||
if container.Cgroups.Resources.Unified != nil {
|
||||
if r.Unified != nil {
|
||||
return cgroups.ErrV1NoUnified
|
||||
}
|
||||
dbusConnection, err := getDbusConnection(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
properties, err := genV1ResourcesProperties(container.Cgroups, dbusConnection)
|
||||
properties, err := genV1ResourcesProperties(r, m.dbus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -373,7 +361,7 @@ func (m *legacyManager) Set(container *configs.Config) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := dbusConnection.SetUnitProperties(getUnitName(container.Cgroups), true, properties...); err != nil {
|
||||
if err := setUnitProperties(m.dbus, getUnitName(m.cgroups), properties...); err != nil {
|
||||
_ = m.Freeze(targetFreezerState)
|
||||
return err
|
||||
}
|
||||
|
@ -388,7 +376,7 @@ func (m *legacyManager) Set(container *configs.Config) error {
|
|||
if !ok {
|
||||
continue
|
||||
}
|
||||
if err := sys.Set(path, container.Cgroups); err != nil {
|
||||
if err := sys.Set(path, r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ type unifiedManager struct {
|
|||
// path is like "/sys/fs/cgroup/user.slice/user-1001.slice/session-1.scope"
|
||||
path string
|
||||
rootless bool
|
||||
dbus *dbusConnManager
|
||||
}
|
||||
|
||||
func NewUnifiedManager(config *configs.Cgroup, path string, rootless bool) cgroups.Manager {
|
||||
|
@ -33,6 +34,7 @@ func NewUnifiedManager(config *configs.Cgroup, path string, rootless bool) cgrou
|
|||
cgroups: config,
|
||||
path: path,
|
||||
rootless: rootless,
|
||||
dbus: newDbusConnManager(rootless),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,7 +47,7 @@ func NewUnifiedManager(config *configs.Cgroup, path string, rootless bool) cgrou
|
|||
// For the list of keys, see https://www.kernel.org/doc/Documentation/cgroup-v2.txt
|
||||
//
|
||||
// For the list of systemd unit properties, see systemd.resource-control(5).
|
||||
func unifiedResToSystemdProps(conn *systemdDbus.Conn, res map[string]string) (props []systemdDbus.Property, _ error) {
|
||||
func unifiedResToSystemdProps(cm *dbusConnManager, res map[string]string) (props []systemdDbus.Property, _ error) {
|
||||
var err error
|
||||
|
||||
for k, v := range res {
|
||||
|
@ -83,7 +85,7 @@ func unifiedResToSystemdProps(conn *systemdDbus.Conn, res map[string]string) (pr
|
|||
return nil, fmt.Errorf("unified resource %q quota value conversion error: %w", k, err)
|
||||
}
|
||||
}
|
||||
addCpuQuota(conn, &props, quota, period)
|
||||
addCpuQuota(cm, &props, quota, period)
|
||||
|
||||
case "cpu.weight":
|
||||
num, err := strconv.ParseUint(v, 10, 64)
|
||||
|
@ -103,7 +105,7 @@ func unifiedResToSystemdProps(conn *systemdDbus.Conn, res map[string]string) (pr
|
|||
"cpuset.mems": "AllowedMemoryNodes",
|
||||
}
|
||||
// systemd only supports these properties since v244
|
||||
sdVer := systemdVersion(conn)
|
||||
sdVer := systemdVersion(cm)
|
||||
if sdVer >= 244 {
|
||||
props = append(props,
|
||||
newProp(m[k], bits))
|
||||
|
@ -162,9 +164,8 @@ func unifiedResToSystemdProps(conn *systemdDbus.Conn, res map[string]string) (pr
|
|||
return props, nil
|
||||
}
|
||||
|
||||
func genV2ResourcesProperties(c *configs.Cgroup, conn *systemdDbus.Conn) ([]systemdDbus.Property, error) {
|
||||
func genV2ResourcesProperties(r *configs.Resources, cm *dbusConnManager) ([]systemdDbus.Property, error) {
|
||||
var properties []systemdDbus.Property
|
||||
r := c.Resources
|
||||
|
||||
// NOTE: This is of questionable correctness because we insert our own
|
||||
// devices eBPF program later. Two programs with identical rules
|
||||
|
@ -200,14 +201,14 @@ func genV2ResourcesProperties(c *configs.Cgroup, conn *systemdDbus.Conn) ([]syst
|
|||
newProp("CPUWeight", r.CpuWeight))
|
||||
}
|
||||
|
||||
addCpuQuota(conn, &properties, r.CpuQuota, r.CpuPeriod)
|
||||
addCpuQuota(cm, &properties, r.CpuQuota, r.CpuPeriod)
|
||||
|
||||
if r.PidsLimit > 0 || r.PidsLimit == -1 {
|
||||
properties = append(properties,
|
||||
newProp("TasksMax", uint64(r.PidsLimit)))
|
||||
}
|
||||
|
||||
err = addCpuset(conn, &properties, r.CpusetCpus, r.CpusetMems)
|
||||
err = addCpuset(cm, &properties, r.CpusetCpus, r.CpusetMems)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -216,7 +217,7 @@ func genV2ResourcesProperties(c *configs.Cgroup, conn *systemdDbus.Conn) ([]syst
|
|||
|
||||
// convert Resources.Unified map to systemd properties
|
||||
if r.Unified != nil {
|
||||
unifiedProps, err := unifiedResToSystemdProps(conn, r.Unified)
|
||||
unifiedProps, err := unifiedResToSystemdProps(cm, r.Unified)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -279,17 +280,13 @@ func (m *unifiedManager) Apply(pid int) error {
|
|||
properties = append(properties,
|
||||
newProp("DefaultDependencies", false))
|
||||
|
||||
dbusConnection, err := getDbusConnection(m.rootless)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
properties = append(properties, c.SystemdProps...)
|
||||
|
||||
if err := startUnit(dbusConnection, unitName, properties); err != nil {
|
||||
if err := startUnit(m.dbus, unitName, properties); err != nil {
|
||||
return errors.Wrapf(err, "error while starting unit %q with properties %+v", unitName, properties)
|
||||
}
|
||||
|
||||
if err = m.initPath(); err != nil {
|
||||
if err := m.initPath(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fs2.CreateCgroupPath(m.path, m.cgroups); err != nil {
|
||||
|
@ -305,17 +302,13 @@ func (m *unifiedManager) Destroy() error {
|
|||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
dbusConnection, err := getDbusConnection(m.rootless)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
unitName := getUnitName(m.cgroups)
|
||||
if err := stopUnit(dbusConnection, unitName); err != nil {
|
||||
if err := stopUnit(m.dbus, unitName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// XXX this is probably not needed, systemd should handle it
|
||||
err = os.Remove(m.path)
|
||||
err := os.Remove(m.path)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -324,6 +317,7 @@ func (m *unifiedManager) Destroy() error {
|
|||
}
|
||||
|
||||
func (m *unifiedManager) Path(_ string) string {
|
||||
_ = m.initPath()
|
||||
return m.path
|
||||
}
|
||||
|
||||
|
@ -344,16 +338,8 @@ func (m *unifiedManager) getSliceFull() (string, error) {
|
|||
}
|
||||
|
||||
if m.rootless {
|
||||
dbusConnection, err := getDbusConnection(m.rootless)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// managerCGQuoted is typically "/user.slice/user-${uid}.slice/user@${uid}.service" including the quote symbols
|
||||
managerCGQuoted, err := dbusConnection.GetManagerProperty("ControlGroup")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
managerCG, err := strconv.Unquote(managerCGQuoted)
|
||||
// managerCG is typically "/user.slice/user-${uid}.slice/user@${uid}.service".
|
||||
managerCG, err := getManagerProperty(m.dbus, "ControlGroup")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -426,12 +412,8 @@ func (m *unifiedManager) GetStats() (*cgroups.Stats, error) {
|
|||
return fsMgr.GetStats()
|
||||
}
|
||||
|
||||
func (m *unifiedManager) Set(container *configs.Config) error {
|
||||
dbusConnection, err := getDbusConnection(m.rootless)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
properties, err := genV2ResourcesProperties(m.cgroups, dbusConnection)
|
||||
func (m *unifiedManager) Set(r *configs.Resources) error {
|
||||
properties, err := genV2ResourcesProperties(r, m.dbus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -459,7 +441,7 @@ func (m *unifiedManager) Set(container *configs.Config) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := dbusConnection.SetUnitProperties(getUnitName(m.cgroups), true, properties...); err != nil {
|
||||
if err := setUnitProperties(m.dbus, getUnitName(m.cgroups), properties...); err != nil {
|
||||
_ = m.Freeze(targetFreezerState)
|
||||
return errors.Wrap(err, "error while setting unit properties")
|
||||
}
|
||||
|
@ -472,7 +454,7 @@ func (m *unifiedManager) Set(container *configs.Config) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fsMgr.Set(container)
|
||||
return fsMgr.Set(r)
|
||||
}
|
||||
|
||||
func (m *unifiedManager) GetPaths() map[string]string {
|
||||
|
@ -498,5 +480,9 @@ func (m *unifiedManager) Exists() bool {
|
|||
}
|
||||
|
||||
func (m *unifiedManager) OOMKillCount() (uint64, error) {
|
||||
return fs2.OOMKillCount(m.path)
|
||||
fsMgr, err := m.fsManager()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return fsMgr.OOMKillCount()
|
||||
}
|
||||
|
|
|
@ -31,9 +31,10 @@ type IDMap struct {
|
|||
// for syscalls. Additional architectures can be added by specifying them in
|
||||
// Architectures.
|
||||
type Seccomp struct {
|
||||
DefaultAction Action `json:"default_action"`
|
||||
Architectures []string `json:"architectures"`
|
||||
Syscalls []*Syscall `json:"syscalls"`
|
||||
DefaultAction Action `json:"default_action"`
|
||||
Architectures []string `json:"architectures"`
|
||||
Syscalls []*Syscall `json:"syscalls"`
|
||||
DefaultErrnoRet *uint `json:"default_errno_ret"`
|
||||
}
|
||||
|
||||
// Action is taken upon rule match in Seccomp
|
||||
|
|
11
vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go
generated
vendored
11
vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go
generated
vendored
|
@ -39,6 +39,7 @@ func (v *ConfigValidator) Validate(config *configs.Config) error {
|
|||
v.sysctl,
|
||||
v.intelrdt,
|
||||
v.rootlessEUID,
|
||||
v.mounts,
|
||||
}
|
||||
for _, c := range checks {
|
||||
if err := c(config); err != nil {
|
||||
|
@ -246,6 +247,16 @@ func (v *ConfigValidator) cgroups(config *configs.Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (v *ConfigValidator) mounts(config *configs.Config) error {
|
||||
for _, m := range config.Mounts {
|
||||
if !filepath.IsAbs(m.Destination) {
|
||||
return fmt.Errorf("invalid mount %+v: mount destination not absolute", m)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isHostNetNS(path string) (bool, error) {
|
||||
const currentProcessNetns = "/proc/self/ns/net"
|
||||
|
||||
|
|
|
@ -225,9 +225,9 @@ func (c *linuxContainer) Set(config configs.Config) error {
|
|||
if status == Stopped {
|
||||
return newGenericError(errors.New("container not running"), ContainerNotRunning)
|
||||
}
|
||||
if err := c.cgroupManager.Set(&config); err != nil {
|
||||
if err := c.cgroupManager.Set(config.Cgroups.Resources); err != nil {
|
||||
// Set configs back
|
||||
if err2 := c.cgroupManager.Set(c.config); err2 != nil {
|
||||
if err2 := c.cgroupManager.Set(c.config.Cgroups.Resources); err2 != nil {
|
||||
logrus.Warnf("Setting back cgroup configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
|
||||
}
|
||||
return err
|
||||
|
@ -235,7 +235,7 @@ func (c *linuxContainer) Set(config configs.Config) error {
|
|||
if c.intelRdtManager != nil {
|
||||
if err := c.intelRdtManager.Set(&config); err != nil {
|
||||
// Set configs back
|
||||
if err2 := c.cgroupManager.Set(c.config); err2 != nil {
|
||||
if err2 := c.cgroupManager.Set(c.config.Cgroups.Resources); err2 != nil {
|
||||
logrus.Warnf("Setting back cgroup configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
|
||||
}
|
||||
if err2 := c.intelRdtManager.Set(c.config); err2 != nil {
|
||||
|
@ -610,6 +610,9 @@ func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
|
|||
AppArmorProfile: c.config.AppArmorProfile,
|
||||
ProcessLabel: c.config.ProcessLabel,
|
||||
Rlimits: c.config.Rlimits,
|
||||
CreateConsole: process.ConsoleSocket != nil,
|
||||
ConsoleWidth: process.ConsoleWidth,
|
||||
ConsoleHeight: process.ConsoleHeight,
|
||||
}
|
||||
if process.NoNewPrivileges != nil {
|
||||
cfg.NoNewPrivileges = *process.NoNewPrivileges
|
||||
|
@ -623,9 +626,10 @@ func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
|
|||
if len(process.Rlimits) > 0 {
|
||||
cfg.Rlimits = process.Rlimits
|
||||
}
|
||||
cfg.CreateConsole = process.ConsoleSocket != nil
|
||||
cfg.ConsoleWidth = process.ConsoleWidth
|
||||
cfg.ConsoleHeight = process.ConsoleHeight
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
cfg.Cgroup2Path = c.cgroupManager.Path("")
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
|
@ -1213,7 +1217,6 @@ func (c *linuxContainer) makeCriuRestoreMountpoints(m *configs.Mount) error {
|
|||
if err := checkProcMount(c.config.Rootfs, dest, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Destination = dest
|
||||
if err := os.MkdirAll(dest, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1253,13 +1256,16 @@ func (c *linuxContainer) prepareCriuRestoreMounts(mounts []*configs.Mount) error
|
|||
umounts := []string{}
|
||||
defer func() {
|
||||
for _, u := range umounts {
|
||||
if e := unix.Unmount(u, unix.MNT_DETACH); e != nil {
|
||||
if e != unix.EINVAL {
|
||||
// Ignore EINVAL as it means 'target is not a mount point.'
|
||||
// It probably has already been unmounted.
|
||||
logrus.Warnf("Error during cleanup unmounting of %q (%v)", u, e)
|
||||
_ = utils.WithProcfd(c.config.Rootfs, u, func(procfd string) error {
|
||||
if e := unix.Unmount(procfd, unix.MNT_DETACH); e != nil {
|
||||
if e != unix.EINVAL {
|
||||
// Ignore EINVAL as it means 'target is not a mount point.'
|
||||
// It probably has already been unmounted.
|
||||
logrus.Warnf("Error during cleanup unmounting of %s (%s): %v", procfd, u, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}()
|
||||
for _, m := range mounts {
|
||||
|
@ -1277,8 +1283,13 @@ func (c *linuxContainer) prepareCriuRestoreMounts(mounts []*configs.Mount) error
|
|||
// because during initial container creation mounts are
|
||||
// set up in the order they are configured.
|
||||
if m.Device == "bind" {
|
||||
if err := unix.Mount(m.Source, m.Destination, "", unix.MS_BIND|unix.MS_REC, ""); err != nil {
|
||||
return errorsf.Wrapf(err, "unable to bind mount %q to %q", m.Source, m.Destination)
|
||||
if err := utils.WithProcfd(c.config.Rootfs, m.Destination, func(procfd string) error {
|
||||
if err := unix.Mount(m.Source, procfd, "", unix.MS_BIND|unix.MS_REC, ""); err != nil {
|
||||
return errorsf.Wrapf(err, "unable to bind mount %q to %q (through %q)", m.Source, m.Destination, procfd)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
umounts = append(umounts, m.Destination)
|
||||
}
|
||||
|
@ -1458,7 +1469,7 @@ func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := c.cgroupManager.Set(c.config); err != nil {
|
||||
if err := c.cgroupManager.Set(c.config.Cgroups.Resources); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -70,6 +70,7 @@ type initConfig struct {
|
|||
RootlessEUID bool `json:"rootless_euid,omitempty"`
|
||||
RootlessCgroups bool `json:"rootless_cgroups,omitempty"`
|
||||
SpecState *specs.State `json:"spec_state,omitempty"`
|
||||
Cgroup2Path string `json:"cgroup2_path,omitempty"`
|
||||
}
|
||||
|
||||
type initer interface {
|
||||
|
|
|
@ -24,6 +24,7 @@ type Config struct {
|
|||
LogFormat string
|
||||
LogFilePath string
|
||||
LogPipeFd int
|
||||
LogCaller bool
|
||||
}
|
||||
|
||||
func ForwardLogs(logPipe io.ReadCloser) chan error {
|
||||
|
@ -77,6 +78,7 @@ func ConfigureLogging(config Config) error {
|
|||
}
|
||||
|
||||
logrus.SetLevel(config.LogLevel)
|
||||
logrus.SetReportCaller(config.LogCaller)
|
||||
|
||||
// XXX: while 0 is a valid fd (usually stdin), here we assume
|
||||
// that we never deliberately set LogPipeFd to 0.
|
||||
|
|
|
@ -75,12 +75,12 @@
|
|||
# define SYS_memfd_create 385
|
||||
# elif defined(__aarch64__)
|
||||
# define SYS_memfd_create 279
|
||||
# elif defined(__ppc__) || defined(__ppc64__)
|
||||
# elif defined(__ppc__) || defined(__PPC64__) || defined(__powerpc64__)
|
||||
# define SYS_memfd_create 360
|
||||
# elif defined(__s390__) || defined(__s390x__)
|
||||
# define SYS_memfd_create 350
|
||||
# else
|
||||
# error "unknown architecture -- cannot hard-code SYS_memfd_create"
|
||||
# warning "unknown architecture -- cannot hard-code SYS_memfd_create"
|
||||
# endif
|
||||
# endif
|
||||
#endif
|
||||
|
|
|
@ -448,7 +448,7 @@ func (p *initProcess) start() (retErr error) {
|
|||
// call prestart and CreateRuntime hooks
|
||||
if !p.config.Config.Namespaces.Contains(configs.NEWNS) {
|
||||
// Setup cgroup before the hook, so that the prestart and CreateRuntime hook could apply cgroup permissions.
|
||||
if err := p.manager.Set(p.config.Config); err != nil {
|
||||
if err := p.manager.Set(p.config.Config.Cgroups.Resources); err != nil {
|
||||
return newSystemErrorWithCause(err, "setting cgroup config for ready process")
|
||||
}
|
||||
if p.intelRdtManager != nil {
|
||||
|
@ -504,7 +504,7 @@ func (p *initProcess) start() (retErr error) {
|
|||
sentRun = true
|
||||
case procHooks:
|
||||
// Setup cgroup before prestart hook, so that the prestart hook could apply cgroup permissions.
|
||||
if err := p.manager.Set(p.config.Config); err != nil {
|
||||
if err := p.manager.Set(p.config.Config.Cgroups.Resources); err != nil {
|
||||
return newSystemErrorWithCause(err, "setting cgroup config for procHooks process")
|
||||
}
|
||||
if p.intelRdtManager != nil {
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/moby/sys/mountinfo"
|
||||
"github.com/mrunalp/fileutils"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/devices"
|
||||
"github.com/opencontainers/runc/libcontainer/userns"
|
||||
|
@ -24,11 +25,20 @@ import (
|
|||
libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const defaultMountFlags = unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV
|
||||
|
||||
type mountConfig struct {
|
||||
root string
|
||||
label string
|
||||
cgroup2Path string
|
||||
rootlessCgroups bool
|
||||
cgroupns bool
|
||||
}
|
||||
|
||||
// needsSetupDev returns true if /dev needs to be set up.
|
||||
func needsSetupDev(config *configs.Config) bool {
|
||||
for _, m := range config.Mounts {
|
||||
|
@ -48,7 +58,13 @@ func prepareRootfs(pipe io.ReadWriter, iConfig *initConfig) (err error) {
|
|||
return newSystemErrorWithCause(err, "preparing rootfs")
|
||||
}
|
||||
|
||||
hasCgroupns := config.Namespaces.Contains(configs.NEWCGROUP)
|
||||
mountConfig := &mountConfig{
|
||||
root: config.Rootfs,
|
||||
label: config.MountLabel,
|
||||
cgroup2Path: iConfig.Cgroup2Path,
|
||||
rootlessCgroups: iConfig.RootlessCgroups,
|
||||
cgroupns: config.Namespaces.Contains(configs.NEWCGROUP),
|
||||
}
|
||||
setupDev := needsSetupDev(config)
|
||||
for _, m := range config.Mounts {
|
||||
for _, precmd := range m.PremountCmds {
|
||||
|
@ -56,7 +72,7 @@ func prepareRootfs(pipe io.ReadWriter, iConfig *initConfig) (err error) {
|
|||
return newSystemErrorWithCause(err, "running premount command")
|
||||
}
|
||||
}
|
||||
if err := mountToRootfs(m, config.Rootfs, config.MountLabel, hasCgroupns); err != nil {
|
||||
if err := mountToRootfs(m, mountConfig); err != nil {
|
||||
return newSystemErrorWithCausef(err, "mounting %q to rootfs at %q", m.Source, m.Destination)
|
||||
}
|
||||
|
||||
|
@ -213,8 +229,6 @@ func prepareBindMount(m *configs.Mount, rootfs string) error {
|
|||
if err := checkProcMount(rootfs, dest, m.Source); err != nil {
|
||||
return err
|
||||
}
|
||||
// update the mount with the correct dest after symlinks are resolved.
|
||||
m.Destination = dest
|
||||
if err := createIfNotExists(dest, stat.IsDir()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -222,7 +236,7 @@ func prepareBindMount(m *configs.Mount, rootfs string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func mountCgroupV1(m *configs.Mount, rootfs, mountLabel string, enableCgroupns bool) error {
|
||||
func mountCgroupV1(m *configs.Mount, c *mountConfig) error {
|
||||
binds, err := getCgroupMounts(m)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -242,31 +256,34 @@ func mountCgroupV1(m *configs.Mount, rootfs, mountLabel string, enableCgroupns b
|
|||
Data: "mode=755",
|
||||
PropagationFlags: m.PropagationFlags,
|
||||
}
|
||||
if err := mountToRootfs(tmpfs, rootfs, mountLabel, enableCgroupns); err != nil {
|
||||
if err := mountToRootfs(tmpfs, c); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, b := range binds {
|
||||
if enableCgroupns {
|
||||
subsystemPath := filepath.Join(rootfs, b.Destination)
|
||||
if c.cgroupns {
|
||||
subsystemPath := filepath.Join(c.root, b.Destination)
|
||||
if err := os.MkdirAll(subsystemPath, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
flags := defaultMountFlags
|
||||
if m.Flags&unix.MS_RDONLY != 0 {
|
||||
flags = flags | unix.MS_RDONLY
|
||||
}
|
||||
cgroupmount := &configs.Mount{
|
||||
Source: "cgroup",
|
||||
Device: "cgroup", // this is actually fstype
|
||||
Destination: subsystemPath,
|
||||
Flags: flags,
|
||||
Data: filepath.Base(subsystemPath),
|
||||
}
|
||||
if err := mountNewCgroup(cgroupmount); err != nil {
|
||||
if err := utils.WithProcfd(c.root, b.Destination, func(procfd string) error {
|
||||
flags := defaultMountFlags
|
||||
if m.Flags&unix.MS_RDONLY != 0 {
|
||||
flags = flags | unix.MS_RDONLY
|
||||
}
|
||||
var (
|
||||
source = "cgroup"
|
||||
data = filepath.Base(subsystemPath)
|
||||
)
|
||||
if data == "systemd" {
|
||||
data = cgroups.CgroupNamePrefix + data
|
||||
source = "systemd"
|
||||
}
|
||||
return unix.Mount(source, procfd, "cgroup", uintptr(flags), data)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := mountToRootfs(b, rootfs, mountLabel, enableCgroupns); err != nil {
|
||||
if err := mountToRootfs(b, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -276,7 +293,7 @@ func mountCgroupV1(m *configs.Mount, rootfs, mountLabel string, enableCgroupns b
|
|||
// symlink(2) is very dumb, it will just shove the path into
|
||||
// the link and doesn't do any checks or relative path
|
||||
// conversion. Also, don't error out if the cgroup already exists.
|
||||
if err := os.Symlink(mc, filepath.Join(rootfs, m.Destination, ss)); err != nil && !os.IsExist(err) {
|
||||
if err := os.Symlink(mc, filepath.Join(c.root, m.Destination, ss)); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -284,30 +301,87 @@ func mountCgroupV1(m *configs.Mount, rootfs, mountLabel string, enableCgroupns b
|
|||
return nil
|
||||
}
|
||||
|
||||
func mountCgroupV2(m *configs.Mount, rootfs, mountLabel string, enableCgroupns bool) error {
|
||||
cgroupPath, err := securejoin.SecureJoin(rootfs, m.Destination)
|
||||
func mountCgroupV2(m *configs.Mount, c *mountConfig) error {
|
||||
dest, err := securejoin.SecureJoin(c.root, m.Destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(cgroupPath, 0755); err != nil {
|
||||
if err := os.MkdirAll(dest, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := unix.Mount(m.Source, cgroupPath, "cgroup2", uintptr(m.Flags), m.Data); err != nil {
|
||||
// when we are in UserNS but CgroupNS is not unshared, we cannot mount cgroup2 (#2158)
|
||||
if err == unix.EPERM || err == unix.EBUSY {
|
||||
return unix.Mount("/sys/fs/cgroup", cgroupPath, "", uintptr(m.Flags)|unix.MS_BIND, "")
|
||||
return utils.WithProcfd(c.root, m.Destination, func(procfd string) error {
|
||||
if err := unix.Mount(m.Source, procfd, "cgroup2", uintptr(m.Flags), m.Data); err != nil {
|
||||
// when we are in UserNS but CgroupNS is not unshared, we cannot mount cgroup2 (#2158)
|
||||
if err == unix.EPERM || err == unix.EBUSY {
|
||||
src := fs2.UnifiedMountpoint
|
||||
if c.cgroupns && c.cgroup2Path != "" {
|
||||
// Emulate cgroupns by bind-mounting
|
||||
// the container cgroup path rather than
|
||||
// the whole /sys/fs/cgroup.
|
||||
src = c.cgroup2Path
|
||||
}
|
||||
err = unix.Mount(src, procfd, "", uintptr(m.Flags)|unix.MS_BIND, "")
|
||||
if err == unix.ENOENT && c.rootlessCgroups {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func mountToRootfs(m *configs.Mount, rootfs, mountLabel string, enableCgroupns bool) error {
|
||||
var (
|
||||
dest = m.Destination
|
||||
)
|
||||
if !strings.HasPrefix(dest, rootfs) {
|
||||
dest = filepath.Join(rootfs, dest)
|
||||
func doTmpfsCopyUp(m *configs.Mount, rootfs, mountLabel string) (Err error) {
|
||||
// Set up a scratch dir for the tmpfs on the host.
|
||||
tmpdir, err := prepareTmp("/tmp")
|
||||
if err != nil {
|
||||
return newSystemErrorWithCause(err, "tmpcopyup: failed to setup tmpdir")
|
||||
}
|
||||
defer cleanupTmp(tmpdir)
|
||||
tmpDir, err := ioutil.TempDir(tmpdir, "runctmpdir")
|
||||
if err != nil {
|
||||
return newSystemErrorWithCause(err, "tmpcopyup: failed to create tmpdir")
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Configure the *host* tmpdir as if it's the container mount. We change
|
||||
// m.Destination since we are going to mount *on the host*.
|
||||
oldDest := m.Destination
|
||||
m.Destination = tmpDir
|
||||
err = mountPropagate(m, "/", mountLabel)
|
||||
m.Destination = oldDest
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if Err != nil {
|
||||
if err := unix.Unmount(tmpDir, unix.MNT_DETACH); err != nil {
|
||||
logrus.Warnf("tmpcopyup: failed to unmount tmpdir on error: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return utils.WithProcfd(rootfs, m.Destination, func(procfd string) (Err error) {
|
||||
// Copy the container data to the host tmpdir. We append "/" to force
|
||||
// CopyDirectory to resolve the symlink rather than trying to copy the
|
||||
// symlink itself.
|
||||
if err := fileutils.CopyDirectory(procfd+"/", tmpDir); err != nil {
|
||||
return fmt.Errorf("tmpcopyup: failed to copy %s to %s (%s): %w", m.Destination, procfd, tmpDir, err)
|
||||
}
|
||||
// Now move the mount into the container.
|
||||
if err := unix.Mount(tmpDir, procfd, "", unix.MS_MOVE, ""); err != nil {
|
||||
return fmt.Errorf("tmpcopyup: failed to move mount %s to %s (%s): %w", tmpDir, procfd, m.Destination, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func mountToRootfs(m *configs.Mount, c *mountConfig) error {
|
||||
rootfs := c.root
|
||||
mountLabel := c.label
|
||||
dest, err := securejoin.SecureJoin(rootfs, m.Destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch m.Device {
|
||||
|
@ -338,53 +412,21 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string, enableCgroupns b
|
|||
}
|
||||
return label.SetFileLabel(dest, mountLabel)
|
||||
case "tmpfs":
|
||||
copyUp := m.Extensions&configs.EXT_COPYUP == configs.EXT_COPYUP
|
||||
tmpDir := ""
|
||||
// dest might be an absolute symlink, so it needs
|
||||
// to be resolved under rootfs.
|
||||
dest, err := securejoin.SecureJoin(rootfs, m.Destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Destination = dest
|
||||
stat, err := os.Stat(dest)
|
||||
if err != nil {
|
||||
if err := os.MkdirAll(dest, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if copyUp {
|
||||
tmpdir, err := prepareTmp("/tmp")
|
||||
if err != nil {
|
||||
return newSystemErrorWithCause(err, "tmpcopyup: failed to setup tmpdir")
|
||||
}
|
||||
defer cleanupTmp(tmpdir)
|
||||
tmpDir, err = ioutil.TempDir(tmpdir, "runctmpdir")
|
||||
if err != nil {
|
||||
return newSystemErrorWithCause(err, "tmpcopyup: failed to create tmpdir")
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
m.Destination = tmpDir
|
||||
|
||||
if m.Extensions&configs.EXT_COPYUP == configs.EXT_COPYUP {
|
||||
err = doTmpfsCopyUp(m, rootfs, mountLabel)
|
||||
} else {
|
||||
err = mountPropagate(m, rootfs, mountLabel)
|
||||
}
|
||||
if err := mountPropagate(m, rootfs, mountLabel); err != nil {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if copyUp {
|
||||
if err := fileutils.CopyDirectory(dest, tmpDir); err != nil {
|
||||
errMsg := fmt.Errorf("tmpcopyup: failed to copy %s to %s: %v", dest, tmpDir, err)
|
||||
if err1 := unix.Unmount(tmpDir, unix.MNT_DETACH); err1 != nil {
|
||||
return newSystemErrorWithCausef(err1, "tmpcopyup: %v: failed to unmount", errMsg)
|
||||
}
|
||||
return errMsg
|
||||
}
|
||||
if err := unix.Mount(tmpDir, dest, "", unix.MS_MOVE, ""); err != nil {
|
||||
errMsg := fmt.Errorf("tmpcopyup: failed to move mount %s to %s: %v", tmpDir, dest, err)
|
||||
if err1 := unix.Unmount(tmpDir, unix.MNT_DETACH); err1 != nil {
|
||||
return newSystemErrorWithCausef(err1, "tmpcopyup: %v: failed to unmount", errMsg)
|
||||
}
|
||||
return errMsg
|
||||
}
|
||||
}
|
||||
if stat != nil {
|
||||
if err = os.Chmod(dest, stat.Mode()); err != nil {
|
||||
return err
|
||||
|
@ -424,23 +466,13 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string, enableCgroupns b
|
|||
}
|
||||
case "cgroup":
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
return mountCgroupV2(m, rootfs, mountLabel, enableCgroupns)
|
||||
return mountCgroupV2(m, c)
|
||||
}
|
||||
return mountCgroupV1(m, rootfs, mountLabel, enableCgroupns)
|
||||
return mountCgroupV1(m, c)
|
||||
default:
|
||||
// ensure that the destination of the mount is resolved of symlinks at mount time because
|
||||
// any previous mounts can invalidate the next mount's destination.
|
||||
// this can happen when a user specifies mounts within other mounts to cause breakouts or other
|
||||
// evil stuff to try to escape the container's rootfs.
|
||||
var err error
|
||||
if dest, err = securejoin.SecureJoin(rootfs, m.Destination); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkProcMount(rootfs, dest, m.Source); err != nil {
|
||||
return err
|
||||
}
|
||||
// update the mount with the correct dest after symlinks are resolved.
|
||||
m.Destination = dest
|
||||
if err := os.MkdirAll(dest, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -623,7 +655,7 @@ func createDevices(config *configs.Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func bindMountDeviceNode(dest string, node *devices.Device) error {
|
||||
func bindMountDeviceNode(rootfs, dest string, node *devices.Device) error {
|
||||
f, err := os.Create(dest)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
|
@ -631,7 +663,9 @@ func bindMountDeviceNode(dest string, node *devices.Device) error {
|
|||
if f != nil {
|
||||
f.Close()
|
||||
}
|
||||
return unix.Mount(node.Path, dest, "bind", unix.MS_BIND, "")
|
||||
return utils.WithProcfd(rootfs, dest, func(procfd string) error {
|
||||
return unix.Mount(node.Path, procfd, "bind", unix.MS_BIND, "")
|
||||
})
|
||||
}
|
||||
|
||||
// Creates the device node in the rootfs of the container.
|
||||
|
@ -640,18 +674,21 @@ func createDeviceNode(rootfs string, node *devices.Device, bind bool) error {
|
|||
// The node only exists for cgroup reasons, ignore it here.
|
||||
return nil
|
||||
}
|
||||
dest := filepath.Join(rootfs, node.Path)
|
||||
dest, err := securejoin.SecureJoin(rootfs, node.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if bind {
|
||||
return bindMountDeviceNode(dest, node)
|
||||
return bindMountDeviceNode(rootfs, dest, node)
|
||||
}
|
||||
if err := mknodDevice(dest, node); err != nil {
|
||||
if os.IsExist(err) {
|
||||
return nil
|
||||
} else if os.IsPermission(err) {
|
||||
return bindMountDeviceNode(dest, node)
|
||||
return bindMountDeviceNode(rootfs, dest, node)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -931,9 +968,20 @@ func readonlyPath(path string) error {
|
|||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
return &os.PathError{Op: "bind-mount", Path: path, Err: err}
|
||||
}
|
||||
return unix.Mount(path, path, "", unix.MS_BIND|unix.MS_REMOUNT|unix.MS_RDONLY|unix.MS_REC, "")
|
||||
|
||||
var s unix.Statfs_t
|
||||
if err := unix.Statfs(path, &s); err != nil {
|
||||
return &os.PathError{Op: "statfs", Path: path, Err: err}
|
||||
}
|
||||
flags := uintptr(s.Flags) & (unix.MS_NOSUID | unix.MS_NODEV | unix.MS_NOEXEC)
|
||||
|
||||
if err := unix.Mount(path, path, "", flags|unix.MS_BIND|unix.MS_REMOUNT|unix.MS_RDONLY, ""); err != nil {
|
||||
return &os.PathError{Op: "bind-mount-ro", Path: path, Err: err}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// remountReadonly will remount an existing mount point and ensure that it is read-only.
|
||||
|
@ -987,61 +1035,47 @@ func writeSystemProperty(key, value string) error {
|
|||
}
|
||||
|
||||
func remount(m *configs.Mount, rootfs string) error {
|
||||
var (
|
||||
dest = m.Destination
|
||||
)
|
||||
if !strings.HasPrefix(dest, rootfs) {
|
||||
dest = filepath.Join(rootfs, dest)
|
||||
}
|
||||
return unix.Mount(m.Source, dest, m.Device, uintptr(m.Flags|unix.MS_REMOUNT), "")
|
||||
return utils.WithProcfd(rootfs, m.Destination, func(procfd string) error {
|
||||
return unix.Mount(m.Source, procfd, m.Device, uintptr(m.Flags|unix.MS_REMOUNT), "")
|
||||
})
|
||||
}
|
||||
|
||||
// Do the mount operation followed by additional mounts required to take care
|
||||
// of propagation flags.
|
||||
// of propagation flags. This will always be scoped inside the container rootfs.
|
||||
func mountPropagate(m *configs.Mount, rootfs string, mountLabel string) error {
|
||||
var (
|
||||
dest = m.Destination
|
||||
data = label.FormatMountLabel(m.Data, mountLabel)
|
||||
flags = m.Flags
|
||||
)
|
||||
if libcontainerUtils.CleanPath(dest) == "/dev" {
|
||||
// Delay mounting the filesystem read-only if we need to do further
|
||||
// operations on it. We need to set up files in "/dev" and tmpfs mounts may
|
||||
// need to be chmod-ed after mounting. The mount will be remounted ro later
|
||||
// in finalizeRootfs() if necessary.
|
||||
if libcontainerUtils.CleanPath(m.Destination) == "/dev" || m.Device == "tmpfs" {
|
||||
flags &= ^unix.MS_RDONLY
|
||||
}
|
||||
|
||||
// Mount it rw to allow chmod operation. A remount will be performed
|
||||
// later to make it ro if set.
|
||||
if m.Device == "tmpfs" {
|
||||
flags &= ^unix.MS_RDONLY
|
||||
// Because the destination is inside a container path which might be
|
||||
// mutating underneath us, we verify that we are actually going to mount
|
||||
// inside the container with WithProcfd() -- mounting through a procfd
|
||||
// mounts on the target.
|
||||
if err := utils.WithProcfd(rootfs, m.Destination, func(procfd string) error {
|
||||
return unix.Mount(m.Source, procfd, m.Device, uintptr(flags), data)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("mount through procfd: %w", err)
|
||||
}
|
||||
|
||||
copyUp := m.Extensions&configs.EXT_COPYUP == configs.EXT_COPYUP
|
||||
if !(copyUp || strings.HasPrefix(dest, rootfs)) {
|
||||
dest = filepath.Join(rootfs, dest)
|
||||
}
|
||||
|
||||
if err := unix.Mount(m.Source, dest, m.Device, uintptr(flags), data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pflag := range m.PropagationFlags {
|
||||
if err := unix.Mount("", dest, "", uintptr(pflag), ""); err != nil {
|
||||
return err
|
||||
// We have to apply mount propagation flags in a separate WithProcfd() call
|
||||
// because the previous call invalidates the passed procfd -- the mount
|
||||
// target needs to be re-opened.
|
||||
if err := utils.WithProcfd(rootfs, m.Destination, func(procfd string) error {
|
||||
for _, pflag := range m.PropagationFlags {
|
||||
if err := unix.Mount("", procfd, "", uintptr(pflag), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mountNewCgroup(m *configs.Mount) error {
|
||||
var (
|
||||
data = m.Data
|
||||
source = m.Source
|
||||
)
|
||||
if data == "systemd" {
|
||||
data = cgroups.CgroupNamePrefix + data
|
||||
source = "systemd"
|
||||
}
|
||||
if err := unix.Mount(source, m.Destination, m.Device, uintptr(m.Flags), data); err != nil {
|
||||
return err
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("change mount propagation through procfd: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
5
vendor/github.com/opencontainers/runc/libcontainer/seccomp/patchbpf/enosys_linux.go
generated
vendored
5
vendor/github.com/opencontainers/runc/libcontainer/seccomp/patchbpf/enosys_linux.go
generated
vendored
|
@ -523,6 +523,11 @@ func assemble(program []bpf.Instruction) ([]unix.SockFilter, error) {
|
|||
}
|
||||
|
||||
func generatePatch(config *configs.Seccomp) ([]bpf.Instruction, error) {
|
||||
// Patch the generated cBPF only when there is not a defaultErrnoRet set
|
||||
// and it is different from ENOSYS
|
||||
if config.DefaultErrnoRet != nil && *config.DefaultErrnoRet == uint(retErrnoEnosys) {
|
||||
return nil, nil
|
||||
}
|
||||
// We only add the stub if the default action is not permissive.
|
||||
if isAllowAction(config.DefaultAction) {
|
||||
logrus.Debugf("seccomp: skipping -ENOSYS stub filter generation")
|
||||
|
|
|
@ -36,7 +36,7 @@ func InitSeccomp(config *configs.Seccomp) error {
|
|||
return errors.New("cannot initialize Seccomp - nil config passed")
|
||||
}
|
||||
|
||||
defaultAction, err := getAction(config.DefaultAction, nil)
|
||||
defaultAction, err := getAction(config.DefaultAction, config.DefaultErrnoRet)
|
||||
if err != nil {
|
||||
return errors.New("error initializing seccomp - invalid default action")
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package specconv
|
|||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
|
@ -200,8 +201,14 @@ func ToRootless(spec *specs.Spec) {
|
|||
// Fix up mounts.
|
||||
var mounts []specs.Mount
|
||||
for _, mount := range spec.Mounts {
|
||||
// Ignore all mounts that are under /sys.
|
||||
if strings.HasPrefix(mount.Destination, "/sys") {
|
||||
// Replace the /sys mount with an rbind.
|
||||
if filepath.Clean(mount.Destination) == "/sys" {
|
||||
mounts = append(mounts, specs.Mount{
|
||||
Source: "/sys",
|
||||
Destination: "/sys",
|
||||
Type: "none",
|
||||
Options: []string{"rbind", "nosuid", "noexec", "nodev", "ro"},
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -216,13 +223,6 @@ func ToRootless(spec *specs.Spec) {
|
|||
mount.Options = options
|
||||
mounts = append(mounts, mount)
|
||||
}
|
||||
// Add the sysfs mount as an rbind.
|
||||
mounts = append(mounts, specs.Mount{
|
||||
Source: "/sys",
|
||||
Destination: "/sys",
|
||||
Type: "none",
|
||||
Options: []string{"rbind", "nosuid", "noexec", "nodev", "ro"},
|
||||
})
|
||||
spec.Mounts = mounts
|
||||
|
||||
// Remove cgroup settings.
|
||||
|
|
|
@ -238,7 +238,11 @@ func CreateLibcontainerConfig(opts *CreateOpts) (*configs.Config, error) {
|
|||
}
|
||||
|
||||
for _, m := range spec.Mounts {
|
||||
config.Mounts = append(config.Mounts, createLibcontainerMount(cwd, m))
|
||||
cm, err := createLibcontainerMount(cwd, m)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid mount %+v: %w", m, err)
|
||||
}
|
||||
config.Mounts = append(config.Mounts, cm)
|
||||
}
|
||||
|
||||
defaultDevs, err := createDevices(spec, config)
|
||||
|
@ -327,7 +331,10 @@ func CreateLibcontainerConfig(opts *CreateOpts) (*configs.Config, error) {
|
|||
return config, nil
|
||||
}
|
||||
|
||||
func createLibcontainerMount(cwd string, m specs.Mount) *configs.Mount {
|
||||
func createLibcontainerMount(cwd string, m specs.Mount) (*configs.Mount, error) {
|
||||
if !filepath.IsAbs(m.Destination) {
|
||||
return nil, fmt.Errorf("mount destination %s not absolute", m.Destination)
|
||||
}
|
||||
flags, pgflags, data, ext := parseMountOptions(m.Options)
|
||||
source := m.Source
|
||||
device := m.Type
|
||||
|
@ -348,7 +355,7 @@ func createLibcontainerMount(cwd string, m specs.Mount) *configs.Mount {
|
|||
Flags: flags,
|
||||
PropagationFlags: pgflags,
|
||||
Extensions: ext,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
// systemd property name check: latin letters only, at least 3 of them
|
||||
|
@ -871,6 +878,7 @@ func SetupSeccomp(config *specs.LinuxSeccomp) (*configs.Seccomp, error) {
|
|||
return nil, err
|
||||
}
|
||||
newConfig.DefaultAction = newDefaultAction
|
||||
newConfig.DefaultErrnoRet = config.DefaultErrnoRet
|
||||
|
||||
// Loop through all syscall blocks and convert them to libcontainer format
|
||||
for _, call := range config.Syscalls {
|
||||
|
|
|
@ -3,12 +3,15 @@ package utils
|
|||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cyphar/filepath-securejoin"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -88,6 +91,57 @@ func CleanPath(path string) string {
|
|||
return filepath.Clean(path)
|
||||
}
|
||||
|
||||
// stripRoot returns the passed path, stripping the root path if it was
|
||||
// (lexicially) inside it. Note that both passed paths will always be treated
|
||||
// as absolute, and the returned path will also always be absolute. In
|
||||
// addition, the paths are cleaned before stripping the root.
|
||||
func stripRoot(root, path string) string {
|
||||
// Make the paths clean and absolute.
|
||||
root, path = CleanPath("/"+root), CleanPath("/"+path)
|
||||
switch {
|
||||
case path == root:
|
||||
path = "/"
|
||||
case root == "/":
|
||||
// do nothing
|
||||
case strings.HasPrefix(path, root+"/"):
|
||||
path = strings.TrimPrefix(path, root+"/")
|
||||
}
|
||||
return CleanPath("/" + path)
|
||||
}
|
||||
|
||||
// WithProcfd runs the passed closure with a procfd path (/proc/self/fd/...)
|
||||
// corresponding to the unsafePath resolved within the root. Before passing the
|
||||
// fd, this path is verified to have been inside the root -- so operating on it
|
||||
// through the passed fdpath should be safe. Do not access this path through
|
||||
// the original path strings, and do not attempt to use the pathname outside of
|
||||
// the passed closure (the file handle will be freed once the closure returns).
|
||||
func WithProcfd(root, unsafePath string, fn func(procfd string) error) error {
|
||||
// Remove the root then forcefully resolve inside the root.
|
||||
unsafePath = stripRoot(root, unsafePath)
|
||||
path, err := securejoin.SecureJoin(root, unsafePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving path inside rootfs failed: %v", err)
|
||||
}
|
||||
|
||||
// Open the target path.
|
||||
fh, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open o_path procfd: %w", err)
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
// Double-check the path is the one we expected.
|
||||
procfd := "/proc/self/fd/" + strconv.Itoa(int(fh.Fd()))
|
||||
if realpath, err := os.Readlink(procfd); err != nil {
|
||||
return fmt.Errorf("procfd verification failed: %w", err)
|
||||
} else if realpath != path {
|
||||
return fmt.Errorf("possibly malicious path detected -- refusing to operate on %s", realpath)
|
||||
}
|
||||
|
||||
// Run the closure.
|
||||
return fn(procfd)
|
||||
}
|
||||
|
||||
// SearchLabels searches a list of key-value pairs for the provided key and
|
||||
// returns the corresponding value. The pairs must be separated with '='.
|
||||
func SearchLabels(labels []string, query string) string {
|
||||
|
|
|
@ -190,6 +190,7 @@ func createLogConfig(context *cli.Context) logs.Config {
|
|||
LogLevel: logrus.InfoLevel,
|
||||
LogFilePath: logFilePath,
|
||||
LogFormat: context.GlobalString("log-format"),
|
||||
LogCaller: context.GlobalBool("debug"),
|
||||
}
|
||||
if context.GlobalBool("debug") {
|
||||
config.LogLevel = logrus.DebugLevel
|
||||
|
|
|
@ -121,14 +121,15 @@ using the runc checkpoint command.`,
|
|||
}
|
||||
|
||||
func criuOptions(context *cli.Context) *libcontainer.CriuOpts {
|
||||
imagePath := getCheckpointImagePath(context)
|
||||
if err := os.MkdirAll(imagePath, 0755); err != nil {
|
||||
imagePath, parentPath, err := prepareImagePaths(context)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
return &libcontainer.CriuOpts{
|
||||
ImagesDirectory: imagePath,
|
||||
WorkDirectory: context.String("work-path"),
|
||||
ParentImage: context.String("parent-path"),
|
||||
ParentImage: parentPath,
|
||||
LeaveRunning: context.Bool("leave-running"),
|
||||
TcpEstablished: context.Bool("tcp-established"),
|
||||
ExternalUnixConnections: context.Bool("ext-unix-sk"),
|
||||
|
|
|
@ -78,8 +78,6 @@ type Connection interface {
|
|||
// SetIdleTimeout sets the amount of time the connection may remain idle before
|
||||
// it is automatically closed.
|
||||
SetIdleTimeout(timeout time.Duration)
|
||||
// RemoveStreams can be used to remove a set of streams from the Connection.
|
||||
RemoveStreams(streams ...Stream)
|
||||
}
|
||||
|
||||
// Stream represents a bidirectional communications channel that is part of an
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
// streams.
|
||||
type connection struct {
|
||||
conn *spdystream.Connection
|
||||
streams map[uint32]httpstream.Stream
|
||||
streams []httpstream.Stream
|
||||
streamLock sync.Mutex
|
||||
newStreamHandler httpstream.NewStreamHandler
|
||||
ping func() (time.Duration, error)
|
||||
|
@ -85,12 +85,7 @@ func NewServerConnectionWithPings(conn net.Conn, newStreamHandler httpstream.New
|
|||
// will be invoked when the server receives a newly created stream from the
|
||||
// client.
|
||||
func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration, pingFn func() (time.Duration, error)) httpstream.Connection {
|
||||
c := &connection{
|
||||
conn: conn,
|
||||
newStreamHandler: newStreamHandler,
|
||||
ping: pingFn,
|
||||
streams: make(map[uint32]httpstream.Stream),
|
||||
}
|
||||
c := &connection{conn: conn, newStreamHandler: newStreamHandler, ping: pingFn}
|
||||
go conn.Serve(c.newSpdyStream)
|
||||
if pingPeriod > 0 && pingFn != nil {
|
||||
go c.sendPings(pingPeriod)
|
||||
|
@ -110,7 +105,7 @@ func (c *connection) Close() error {
|
|||
// calling Reset instead of Close ensures that all streams are fully torn down
|
||||
s.Reset()
|
||||
}
|
||||
c.streams = make(map[uint32]httpstream.Stream, 0)
|
||||
c.streams = make([]httpstream.Stream, 0)
|
||||
c.streamLock.Unlock()
|
||||
|
||||
// now that all streams are fully torn down, it's safe to call close on the underlying connection,
|
||||
|
@ -119,15 +114,6 @@ func (c *connection) Close() error {
|
|||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// RemoveStreams can be used to removes a set of streams from the Connection.
|
||||
func (c *connection) RemoveStreams(streams ...httpstream.Stream) {
|
||||
c.streamLock.Lock()
|
||||
for _, stream := range streams {
|
||||
delete(c.streams, stream.Identifier())
|
||||
}
|
||||
c.streamLock.Unlock()
|
||||
}
|
||||
|
||||
// CreateStream creates a new stream with the specified headers and registers
|
||||
// it with the connection.
|
||||
func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) {
|
||||
|
@ -147,7 +133,7 @@ func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error
|
|||
// it owns.
|
||||
func (c *connection) registerStream(s httpstream.Stream) {
|
||||
c.streamLock.Lock()
|
||||
c.streams[s.Identifier()] = s
|
||||
c.streams = append(c.streams, s)
|
||||
c.streamLock.Unlock()
|
||||
}
|
||||
|
||||
|
|
20
vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go
generated
vendored
20
vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go
generated
vendored
|
@ -25,6 +25,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
const totalAnnotationSizeLimitB int64 = 256 * (1 << 10) // 256 kB
|
||||
|
||||
type lastAppliedUpdater struct {
|
||||
fieldManager Manager
|
||||
}
|
||||
|
@ -78,8 +80,8 @@ func hasLastApplied(obj runtime.Object) bool {
|
|||
if annotations == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := annotations[corev1.LastAppliedConfigAnnotation]
|
||||
return ok
|
||||
lastApplied, ok := annotations[corev1.LastAppliedConfigAnnotation]
|
||||
return ok && len(lastApplied) > 0
|
||||
}
|
||||
|
||||
func setLastApplied(obj runtime.Object, value string) error {
|
||||
|
@ -92,6 +94,9 @@ func setLastApplied(obj runtime.Object, value string) error {
|
|||
annotations = map[string]string{}
|
||||
}
|
||||
annotations[corev1.LastAppliedConfigAnnotation] = value
|
||||
if isAnnotationsValid(annotations) != nil {
|
||||
delete(annotations, corev1.LastAppliedConfigAnnotation)
|
||||
}
|
||||
accessor.SetAnnotations(annotations)
|
||||
return nil
|
||||
}
|
||||
|
@ -115,3 +120,14 @@ func buildLastApplied(obj runtime.Object) (string, error) {
|
|||
}
|
||||
return string(lastApplied), nil
|
||||
}
|
||||
|
||||
func isAnnotationsValid(annotations map[string]string) error {
|
||||
var totalSize int64
|
||||
for k, v := range annotations {
|
||||
totalSize += (int64)(len(k)) + (int64)(len(v))
|
||||
}
|
||||
if totalSize > (int64)(totalAnnotationSizeLimitB) {
|
||||
return fmt.Errorf("annotations size %d is larger than limit %d", totalSize, totalAnnotationSizeLimitB)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -3,8 +3,8 @@ package version
|
|||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "21"
|
||||
gitVersion = "v1.21.1-k3s1"
|
||||
gitCommit = "2748979665974057d48bc75c1bfe05ae5257300c"
|
||||
gitVersion = "v1.21.2-k3s1"
|
||||
gitCommit = "faffd6ecd237a32315a462a25c1cce1f24a9a506"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2021-05-14T01:38:27Z"
|
||||
buildDate = "2021-06-18T03:51:40Z"
|
||||
)
|
||||
|
|
|
@ -216,13 +216,13 @@ var internalPackages = []string{"client-go/tools/cache/"}
|
|||
// objects and subsequent deltas.
|
||||
// Run will exit when stopCh is closed.
|
||||
func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
klog.V(2).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
wait.BackoffUntil(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
r.watchErrorHandler(r, err)
|
||||
}
|
||||
}, r.backoffManager, true, stopCh)
|
||||
klog.V(2).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
|
@ -5,7 +5,7 @@ module k8s.io/cloud-provider
|
|||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/google/go-cmp v0.5.2
|
||||
github.com/google/go-cmp v0.5.4
|
||||
github.com/spf13/cobra v1.1.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.6.1
|
||||
|
|
|
@ -170,8 +170,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
|||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
@ -513,8 +513,8 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 h1:8qxJSnu+7dRq6upnbntrmriWByIakBuct5OM/MdQC1M=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
|
||||
|
@ -679,8 +679,8 @@ k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
|
|||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19 h1:0jaDAAxtqIrrqas4vtTqxct4xS5kHfRNycTRLTyJmVM=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
|
|
|
@ -3,8 +3,8 @@ package version
|
|||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "21"
|
||||
gitVersion = "v1.21.1-k3s1"
|
||||
gitCommit = "2748979665974057d48bc75c1bfe05ae5257300c"
|
||||
gitVersion = "v1.21.2-k3s1"
|
||||
gitCommit = "faffd6ecd237a32315a462a25c1cce1f24a9a506"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2021-05-14T01:38:27Z"
|
||||
buildDate = "2021-06-18T03:51:40Z"
|
||||
)
|
||||
|
|
|
@ -95,7 +95,7 @@ func (c *controller) Run(stopCh <-chan struct{}) {
|
|||
klog.Infof("lease controller has nil lease client, will not claim or renew leases")
|
||||
return
|
||||
}
|
||||
wait.Until(c.sync, c.renewInterval, stopCh)
|
||||
wait.JitterUntil(c.sync, c.renewInterval, 0.04, true, stopCh)
|
||||
}
|
||||
|
||||
func (c *controller) sync() {
|
||||
|
|
|
@ -47,8 +47,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
|||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
@ -142,7 +142,7 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
|
|
|
@ -29,6 +29,7 @@ func watchForLockfileContention(path string, done chan struct{}) error {
|
|||
}
|
||||
if err = watcher.AddWatch(path, inotify.InOpen|inotify.InDeleteSelf); err != nil {
|
||||
klog.ErrorS(err, "Unable to watch lockfile")
|
||||
watcher.Close()
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
|
@ -39,6 +40,7 @@ func watchForLockfileContention(path string, done chan struct{}) error {
|
|||
klog.ErrorS(err, "inotify watcher error")
|
||||
}
|
||||
close(done)
|
||||
watcher.Close()
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ func (r *reconciler) reconcile(endpoints *corev1.Endpoints, existingSlices []*di
|
|||
if totalAddressesAdded >= int(r.maxEndpointsPerSubset) {
|
||||
break
|
||||
}
|
||||
if ok := d.addAddress(address, multiKey, true); ok {
|
||||
if ok := d.addAddress(address, multiKey, false); ok {
|
||||
totalAddressesAdded++
|
||||
} else {
|
||||
numInvalidAddresses++
|
||||
|
|
47
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
47
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
|
@ -57,6 +57,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/features"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/csi"
|
||||
"k8s.io/kubernetes/pkg/volume/csimigration"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
|
@ -714,17 +715,45 @@ func (adc *attachDetachController) processVolumeAttachments() error {
|
|||
klog.Errorf("Unable to lookup pv object for: %q, err: %v", *pvName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var plugin volume.AttachableVolumePlugin
|
||||
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
|
||||
plugin, err := adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
|
||||
if err != nil || plugin == nil {
|
||||
// Currently VA objects are created for CSI volumes only. nil plugin is unexpected, generate a warning
|
||||
klog.Warningf(
|
||||
"Skipping processing the volume %q on nodeName: %q, no attacher interface found. err=%v",
|
||||
*pvName,
|
||||
nodeName,
|
||||
err)
|
||||
continue
|
||||
|
||||
// Consult csiMigratedPluginManager first before querying the plugins registered during runtime in volumePluginMgr.
|
||||
// In-tree plugins that provisioned PVs will not be registered anymore after migration to CSI, once the respective
|
||||
// feature gate is enabled.
|
||||
if inTreePluginName, err := adc.csiMigratedPluginManager.GetInTreePluginNameFromSpec(pv, nil); err == nil {
|
||||
if adc.csiMigratedPluginManager.IsMigrationEnabledForPlugin(inTreePluginName) {
|
||||
// PV is migrated and should be handled by the CSI plugin instead of the in-tree one
|
||||
plugin, _ = adc.volumePluginMgr.FindAttachablePluginByName(csi.CSIPluginName)
|
||||
// podNamespace is not needed here for Azurefile as the volumeName generated will be the same with or without podNamespace
|
||||
volumeSpec, err = csimigration.TranslateInTreeSpecToCSI(volumeSpec, "" /* podNamespace */, adc.intreeToCSITranslator)
|
||||
if err != nil {
|
||||
klog.Errorf(
|
||||
"Failed to translate intree volumeSpec to CSI volumeSpec for volume:%q, va.Name:%q, nodeName:%q: %s. Error: %v",
|
||||
*pvName,
|
||||
va.Name,
|
||||
nodeName,
|
||||
inTreePluginName,
|
||||
err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if plugin == nil {
|
||||
plugin, err = adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
|
||||
if err != nil || plugin == nil {
|
||||
// Currently VA objects are created for CSI volumes only. nil plugin is unexpected, generate a warning
|
||||
klog.Warningf(
|
||||
"Skipping processing the volume %q on nodeName: %q, no attacher interface found. err=%v",
|
||||
*pvName,
|
||||
nodeName,
|
||||
err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
if err != nil {
|
||||
klog.Errorf(
|
||||
|
|
|
@ -357,7 +357,7 @@ type subsystem interface {
|
|||
// Name returns the name of the subsystem.
|
||||
Name() string
|
||||
// Set the cgroup represented by cgroup.
|
||||
Set(path string, cgroup *libcontainerconfigs.Cgroup) error
|
||||
Set(path string, cgroup *libcontainerconfigs.Resources) error
|
||||
// GetStats returns the statistics associated with the cgroup
|
||||
GetStats(path string, stats *libcontainercgroups.Stats) error
|
||||
}
|
||||
|
@ -374,32 +374,6 @@ func getSupportedSubsystems() map[subsystem]bool {
|
|||
return supportedSubsystems
|
||||
}
|
||||
|
||||
// setSupportedSubsystemsV1 sets cgroup resource limits on cgroup v1 only on the supported
|
||||
// subsystems. ie. cpu and memory. We don't use libcontainer's cgroup/fs/Set()
|
||||
// method as it doesn't allow us to skip updates on the devices cgroup
|
||||
// Allowing or denying all devices by writing 'a' to devices.allow or devices.deny is
|
||||
// not possible once the device cgroups has children. Once the pod level cgroup are
|
||||
// created under the QOS level cgroup we cannot update the QOS level device cgroup.
|
||||
// We would like to skip setting any values on the device cgroup in this case
|
||||
// but this is not possible with libcontainers Set() method
|
||||
// See https://github.com/opencontainers/runc/issues/932
|
||||
func setSupportedSubsystemsV1(cgroupConfig *libcontainerconfigs.Cgroup) error {
|
||||
for sys, required := range getSupportedSubsystems() {
|
||||
if _, ok := cgroupConfig.Paths[sys.Name()]; !ok {
|
||||
if required {
|
||||
return fmt.Errorf("failed to find subsystem mount for required subsystem: %v", sys.Name())
|
||||
}
|
||||
// the cgroup is not mounted, but its not required so continue...
|
||||
klog.V(6).InfoS("Unable to find subsystem mount for optional subsystem", "subsystemName", sys.Name())
|
||||
continue
|
||||
}
|
||||
if err := sys.Set(cgroupConfig.Paths[sys.Name()], cgroupConfig); err != nil {
|
||||
return fmt.Errorf("failed to set config for supported subsystems : %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCpuWeight converts from the range [2, 262144] to [1, 10000]
|
||||
func getCpuWeight(cpuShares *uint64) uint64 {
|
||||
if cpuShares == nil {
|
||||
|
@ -487,42 +461,6 @@ func propagateControllers(path string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// setResourcesV2 sets cgroup resource limits on cgroup v2
|
||||
func setResourcesV2(cgroupConfig *libcontainerconfigs.Cgroup, rootless bool) error {
|
||||
if err := propagateControllers(cgroupConfig.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
if !rootless {
|
||||
cgroupConfig.Resources.Devices = []*libcontainerdevices.Rule{
|
||||
{
|
||||
Type: 'a',
|
||||
Permissions: "rwm",
|
||||
Allow: true,
|
||||
Minor: libcontainerdevices.Wildcard,
|
||||
Major: libcontainerdevices.Wildcard,
|
||||
},
|
||||
}
|
||||
}
|
||||
cgroupConfig.Resources.SkipDevices = true
|
||||
|
||||
// if the hugetlb controller is missing
|
||||
supportedControllers := getSupportedUnifiedControllers()
|
||||
if !supportedControllers.Has("hugetlb") {
|
||||
cgroupConfig.Resources.HugetlbLimit = nil
|
||||
// the cgroup is not present, but its not required so skip it
|
||||
klog.V(6).InfoS("Optional subsystem not supported: hugetlb")
|
||||
}
|
||||
|
||||
manager, err := cgroupfs2.NewManager(cgroupConfig, filepath.Join(cmutil.CgroupRoot, cgroupConfig.Path), rootless)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cgroup v2 manager: %v", err)
|
||||
}
|
||||
config := &libcontainerconfigs.Config{
|
||||
Cgroups: cgroupConfig,
|
||||
}
|
||||
return manager.Set(config)
|
||||
}
|
||||
|
||||
func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcontainerconfigs.Resources {
|
||||
resources := &libcontainerconfigs.Resources{
|
||||
Devices: []*libcontainerdevices.Rule{
|
||||
|
@ -602,10 +540,11 @@ func (m *cgroupManagerImpl) Update(cgroupConfig *CgroupConfig) error {
|
|||
}
|
||||
|
||||
unified := libcontainercgroups.IsCgroup2UnifiedMode()
|
||||
var paths map[string]string
|
||||
if unified {
|
||||
libcontainerCgroupConfig.Path = m.Name(cgroupConfig.Name)
|
||||
} else {
|
||||
libcontainerCgroupConfig.Paths = m.buildCgroupPaths(cgroupConfig.Name)
|
||||
paths = m.buildCgroupPaths(cgroupConfig.Name)
|
||||
}
|
||||
|
||||
// libcontainer consumes a different field and expects a different syntax
|
||||
|
@ -615,20 +554,29 @@ func (m *cgroupManagerImpl) Update(cgroupConfig *CgroupConfig) error {
|
|||
}
|
||||
|
||||
if cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil {
|
||||
libcontainerCgroupConfig.PidsLimit = *cgroupConfig.ResourceParameters.PidsLimit
|
||||
resources.PidsLimit = *cgroupConfig.ResourceParameters.PidsLimit
|
||||
}
|
||||
|
||||
if unified {
|
||||
rootless := m.adapter.rootless
|
||||
if err := setResourcesV2(libcontainerCgroupConfig, rootless); err != nil {
|
||||
return fmt.Errorf("failed to set resources for cgroup %v: %v", cgroupConfig.Name, err)
|
||||
if err := propagateControllers(libcontainerCgroupConfig.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := setSupportedSubsystemsV1(libcontainerCgroupConfig); err != nil {
|
||||
return fmt.Errorf("failed to set supported cgroup subsystems for cgroup %v: %v", cgroupConfig.Name, err)
|
||||
|
||||
supportedControllers := getSupportedUnifiedControllers()
|
||||
if !supportedControllers.Has("hugetlb") {
|
||||
resources.HugetlbLimit = nil
|
||||
klog.V(6).InfoS("Optional subsystem not supported: hugetlb")
|
||||
}
|
||||
} else if _, ok := m.subsystems.MountPoints["hugetlb"]; !ok {
|
||||
resources.HugetlbLimit = nil
|
||||
klog.V(6).InfoS("Optional subsystem not supported: hugetlb")
|
||||
}
|
||||
return nil
|
||||
|
||||
manager, err := m.adapter.newManager(libcontainerCgroupConfig, paths)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cgroup manager: %v", err)
|
||||
}
|
||||
return manager.Set(resources)
|
||||
}
|
||||
|
||||
// Create creates the specified cgroup
|
||||
|
|
|
@ -558,12 +558,8 @@ func (cm *containerManagerImpl) setupNode(activePods ActivePodsFunc) error {
|
|||
return err
|
||||
}
|
||||
|
||||
manager, err := createManager(cm.KubeletCgroupsName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cont.ensureStateFunc = func(_ cgroups.Manager) error {
|
||||
return ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, manager)
|
||||
return ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, cont.manager)
|
||||
}
|
||||
systemContainers = append(systemContainers, cont)
|
||||
} else {
|
||||
|
|
|
@ -50,10 +50,7 @@ func (i *internalContainerLifecycleImpl) PreStartContainer(pod *v1.Pod, containe
|
|||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManager) {
|
||||
err := i.topologyManager.AddContainer(pod, containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.topologyManager.AddContainer(pod, container, containerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/fake_topology_manager.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/fake_topology_manager.go
generated
vendored
|
@ -39,9 +39,8 @@ func (m *fakeManager) AddHintProvider(h HintProvider) {
|
|||
klog.InfoS("AddHintProvider", "hintProvider", h)
|
||||
}
|
||||
|
||||
func (m *fakeManager) AddContainer(pod *v1.Pod, containerID string) error {
|
||||
klog.InfoS("AddContainer", "pod", klog.KObj(pod), "containerID", containerID)
|
||||
return nil
|
||||
func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) {
|
||||
klog.InfoS("AddContainer", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID)
|
||||
}
|
||||
|
||||
func (m *fakeManager) RemoveContainer(containerID string) error {
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
)
|
||||
|
||||
|
@ -43,7 +44,7 @@ type Scope interface {
|
|||
// wants to be consoluted with when making topology hints
|
||||
AddHintProvider(h HintProvider)
|
||||
// AddContainer adds pod to Manager for tracking
|
||||
AddContainer(pod *v1.Pod, containerID string) error
|
||||
AddContainer(pod *v1.Pod, container *v1.Container, containerID string)
|
||||
// RemoveContainer removes pod from Manager tracking
|
||||
RemoveContainer(containerID string) error
|
||||
// Store is the interface for storing pod topology hints
|
||||
|
@ -60,8 +61,8 @@ type scope struct {
|
|||
hintProviders []HintProvider
|
||||
// Topology Manager Policy
|
||||
policy Policy
|
||||
// Mapping of PodUID to ContainerID for Adding/Removing Pods from PodTopologyHints mapping
|
||||
podMap map[string]string
|
||||
// Mapping of (PodUid, ContainerName) to ContainerID for Adding/Removing Pods from PodTopologyHints mapping
|
||||
podMap containermap.ContainerMap
|
||||
}
|
||||
|
||||
func (s *scope) Name() string {
|
||||
|
@ -94,12 +95,11 @@ func (s *scope) AddHintProvider(h HintProvider) {
|
|||
|
||||
// It would be better to implement this function in topologymanager instead of scope
|
||||
// but topologymanager do not track mapping anymore
|
||||
func (s *scope) AddContainer(pod *v1.Pod, containerID string) error {
|
||||
func (s *scope) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
s.podMap[containerID] = string(pod.UID)
|
||||
return nil
|
||||
s.podMap.Add(string(pod.UID), container.Name, containerID)
|
||||
}
|
||||
|
||||
// It would be better to implement this function in topologymanager instead of scope
|
||||
|
@ -109,10 +109,18 @@ func (s *scope) RemoveContainer(containerID string) error {
|
|||
defer s.mutex.Unlock()
|
||||
|
||||
klog.InfoS("RemoveContainer", "containerID", containerID)
|
||||
podUIDString := s.podMap[containerID]
|
||||
delete(s.podMap, containerID)
|
||||
if _, exists := s.podTopologyHints[podUIDString]; exists {
|
||||
delete(s.podTopologyHints[podUIDString], containerID)
|
||||
// Get the podUID and containerName associated with the containerID to be removed and remove it
|
||||
podUIDString, containerName, err := s.podMap.GetContainerRef(containerID)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
s.podMap.RemoveByContainerID(containerID)
|
||||
|
||||
// In cases where a container has been restarted, it's possible that the same podUID and
|
||||
// containerName are already associated with a *different* containerID now. Only remove
|
||||
// the TopologyHints associated with that podUID and containerName if this is not true
|
||||
if _, err := s.podMap.GetContainerID(podUIDString, containerName); err != nil {
|
||||
delete(s.podTopologyHints[podUIDString], containerName)
|
||||
if len(s.podTopologyHints[podUIDString]) == 0 {
|
||||
delete(s.podTopologyHints, podUIDString)
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package topologymanager
|
|||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
)
|
||||
|
||||
|
@ -36,7 +37,7 @@ func NewContainerScope(policy Policy) Scope {
|
|||
name: containerTopologyScope,
|
||||
podTopologyHints: podTopologyHints{},
|
||||
policy: policy,
|
||||
podMap: make(map[string]string),
|
||||
podMap: containermap.NewContainerMap(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package topologymanager
|
|||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
)
|
||||
|
||||
|
@ -36,7 +37,7 @@ func NewPodScope(policy Policy) Scope {
|
|||
name: podTopologyScope,
|
||||
podTopologyHints: podTopologyHints{},
|
||||
policy: policy,
|
||||
podMap: make(map[string]string),
|
||||
podMap: containermap.NewContainerMap(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ type Manager interface {
|
|||
// wants to be consulted with when making topology hints
|
||||
AddHintProvider(HintProvider)
|
||||
// AddContainer adds pod to Manager for tracking
|
||||
AddContainer(pod *v1.Pod, containerID string) error
|
||||
AddContainer(pod *v1.Pod, container *v1.Container, containerID string)
|
||||
// RemoveContainer removes pod from Manager tracking
|
||||
RemoveContainer(containerID string) error
|
||||
// Store is the interface for storing pod topology hints
|
||||
|
@ -175,8 +175,8 @@ func (m *manager) AddHintProvider(h HintProvider) {
|
|||
m.scope.AddHintProvider(h)
|
||||
}
|
||||
|
||||
func (m *manager) AddContainer(pod *v1.Pod, containerID string) error {
|
||||
return m.scope.AddContainer(pod, containerID)
|
||||
func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) {
|
||||
m.scope.AddContainer(pod, container, containerID)
|
||||
}
|
||||
|
||||
func (m *manager) RemoveContainer(containerID string) error {
|
||||
|
|
|
@ -163,10 +163,6 @@ func (h *httpStreamHandler) removeStreamPair(requestID string) {
|
|||
h.streamPairsLock.Lock()
|
||||
defer h.streamPairsLock.Unlock()
|
||||
|
||||
if h.conn != nil {
|
||||
pair := h.streamPairs[requestID]
|
||||
h.conn.RemoveStreams(pair.dataStream, pair.errorStream)
|
||||
}
|
||||
delete(h.streamPairs, requestID)
|
||||
}
|
||||
|
||||
|
|
|
@ -81,12 +81,12 @@ func filterEndpointsWithHints(endpoints []Endpoint, hintsAnnotation string, node
|
|||
}
|
||||
}
|
||||
|
||||
if len(filteredEndpoints) > 0 {
|
||||
if len(filteredEndpoints) == 0 {
|
||||
klog.Warningf("Skipping topology aware endpoint filtering since no hints were provided for zone %s", zone)
|
||||
return filteredEndpoints
|
||||
return endpoints
|
||||
}
|
||||
|
||||
return endpoints
|
||||
return filteredEndpoints
|
||||
}
|
||||
|
||||
// deprecatedTopologyFilter returns the appropriate endpoints based on the
|
||||
|
|
|
@ -354,7 +354,7 @@ func newSourceVIP(hns HostNetworkService, network string, ip string, mac string,
|
|||
|
||||
func (ep *endpointsInfo) Cleanup() {
|
||||
Log(ep, "Endpoint Cleanup", 3)
|
||||
if ep.refCount != nil {
|
||||
if !ep.GetIsLocal() && ep.refCount != nil {
|
||||
*ep.refCount--
|
||||
|
||||
// Remove the remote hns endpoint, if no service is referring it
|
||||
|
@ -1157,10 +1157,10 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
} else {
|
||||
// We only share the refCounts for remote endpoints
|
||||
ep.refCount = proxier.endPointsRefCount.getRefCount(newHnsEndpoint.hnsID)
|
||||
*ep.refCount++
|
||||
}
|
||||
|
||||
ep.hnsID = newHnsEndpoint.hnsID
|
||||
*ep.refCount++
|
||||
|
||||
Log(ep, "Endpoint resource found", 3)
|
||||
}
|
||||
|
|
34
vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go
generated
vendored
34
vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
|
@ -108,11 +109,11 @@ func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod *
|
|||
}
|
||||
pvcName := volume.PersistentVolumeClaim.ClaimName
|
||||
if pvcName == "" {
|
||||
return framework.NewStatus(framework.Error, "PersistentVolumeClaim had no name")
|
||||
return framework.NewStatus(framework.UnschedulableAndUnresolvable, "PersistentVolumeClaim had no name")
|
||||
}
|
||||
pvc, err := pl.pvcLister.PersistentVolumeClaims(pod.Namespace).Get(pvcName)
|
||||
if err != nil {
|
||||
return framework.AsStatus(err)
|
||||
if s := getErrorAsStatus(err); !s.IsSuccess() {
|
||||
return s
|
||||
}
|
||||
|
||||
if pvc == nil {
|
||||
|
@ -123,28 +124,27 @@ func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod *
|
|||
if pvName == "" {
|
||||
scName := storagehelpers.GetPersistentVolumeClaimClass(pvc)
|
||||
if len(scName) == 0 {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprint("PersistentVolumeClaim had no pv name and storageClass name"))
|
||||
return framework.NewStatus(framework.UnschedulableAndUnresolvable, "PersistentVolumeClaim had no pv name and storageClass name")
|
||||
}
|
||||
|
||||
class, _ := pl.scLister.Get(scName)
|
||||
if class == nil {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprintf("StorageClass %q claimed by PersistentVolumeClaim %q not found", scName, pvcName))
|
||||
|
||||
class, err := pl.scLister.Get(scName)
|
||||
if s := getErrorAsStatus(err); !s.IsSuccess() {
|
||||
return s
|
||||
}
|
||||
if class.VolumeBindingMode == nil {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprintf("VolumeBindingMode not set for StorageClass %q", scName))
|
||||
return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("VolumeBindingMode not set for StorageClass %q", scName))
|
||||
}
|
||||
if *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer {
|
||||
// Skip unbound volumes
|
||||
continue
|
||||
}
|
||||
|
||||
return framework.NewStatus(framework.Error, fmt.Sprint("PersistentVolume had no name"))
|
||||
return framework.NewStatus(framework.UnschedulableAndUnresolvable, "PersistentVolume had no name")
|
||||
}
|
||||
|
||||
pv, err := pl.pvLister.Get(pvName)
|
||||
if err != nil {
|
||||
return framework.AsStatus(err)
|
||||
if s := getErrorAsStatus(err); !s.IsSuccess() {
|
||||
return s
|
||||
}
|
||||
|
||||
if pv == nil {
|
||||
|
@ -171,6 +171,16 @@ func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod *
|
|||
return nil
|
||||
}
|
||||
|
||||
func getErrorAsStatus(err error) *framework.Status {
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return framework.NewStatus(framework.UnschedulableAndUnresolvable, err.Error())
|
||||
}
|
||||
return framework.AsStatus(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
func New(_ runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
|
|
|
@ -758,6 +758,8 @@ func newUnschedulablePodsMap(metricRecorder metrics.MetricRecorder) *Unschedulab
|
|||
// may be different than what scheduler has here. We should be able to find pods
|
||||
// by their UID and update/delete them.
|
||||
type nominatedPodMap struct {
|
||||
// podLister is used to verify if the given pod is alive.
|
||||
podLister listersv1.PodLister
|
||||
// nominatedPods is a map keyed by a node name and the value is a list of
|
||||
// pods which are nominated to run on the node. These are pods which can be in
|
||||
// the activeQ or unschedulableQ.
|
||||
|
@ -781,6 +783,15 @@ func (npm *nominatedPodMap) add(pi *framework.PodInfo, nodeName string) {
|
|||
return
|
||||
}
|
||||
}
|
||||
|
||||
if npm.podLister != nil {
|
||||
// If the pod is not alive, don't contain it.
|
||||
if _, err := npm.podLister.Pods(pi.Pod.Namespace).Get(pi.Pod.Name); err != nil {
|
||||
klog.V(4).InfoS("Pod doesn't exist in podLister, aborting adding it to the nominated map", "pod", klog.KObj(pi.Pod))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
npm.nominatedPodToNode[pi.Pod.UID] = nnn
|
||||
for _, npi := range npm.nominatedPods[nnn] {
|
||||
if npi.Pod.UID == pi.Pod.UID {
|
||||
|
@ -833,8 +844,17 @@ func (npm *nominatedPodMap) UpdateNominatedPod(oldPod *v1.Pod, newPodInfo *frame
|
|||
}
|
||||
|
||||
// NewPodNominator creates a nominatedPodMap as a backing of framework.PodNominator.
|
||||
// DEPRECATED: use NewSafePodNominator() instead.
|
||||
func NewPodNominator() framework.PodNominator {
|
||||
return NewSafePodNominator(nil)
|
||||
}
|
||||
|
||||
// NewSafePodNominator creates a nominatedPodMap as a backing of framework.PodNominator.
|
||||
// Unlike NewPodNominator, it passes in a podLister so as to check if the pod is alive
|
||||
// before adding its nominatedNode info.
|
||||
func NewSafePodNominator(podLister listersv1.PodLister) framework.PodNominator {
|
||||
return &nominatedPodMap{
|
||||
podLister: podLister,
|
||||
nominatedPods: make(map[string][]*framework.PodInfo),
|
||||
nominatedPodToNode: make(map[ktypes.UID]string),
|
||||
}
|
||||
|
|
|
@ -65,6 +65,10 @@ var (
|
|||
defaultNlbHealthCheckThreshold = int64(3)
|
||||
defaultHealthCheckPort = "traffic-port"
|
||||
defaultHealthCheckPath = "/"
|
||||
|
||||
// Defaults for ELB Target operations
|
||||
defaultRegisterTargetsChunkSize = 100
|
||||
defaultDeregisterTargetsChunkSize = 100
|
||||
)
|
||||
|
||||
func isNLB(annotations map[string]string) bool {
|
||||
|
@ -563,6 +567,7 @@ func (c *Cloud) deleteListenerV2(listener *elbv2.Listener) error {
|
|||
// ensureTargetGroup creates a target group with a set of instances.
|
||||
func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName types.NamespacedName, mapping nlbPortMapping, instances []string, vpcID string, tags map[string]string) (*elbv2.TargetGroup, error) {
|
||||
dirty := false
|
||||
expectedTargets := c.computeTargetGroupExpectedTargets(instances, mapping.TrafficPort)
|
||||
if targetGroup == nil {
|
||||
targetType := "instance"
|
||||
name := c.buildTargetGroupName(serviceName, mapping.FrontendPort, mapping.TrafficPort, mapping.TrafficProtocol, targetType, mapping)
|
||||
|
@ -609,86 +614,23 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty
|
|||
}
|
||||
}
|
||||
|
||||
registerInput := &elbv2.RegisterTargetsInput{
|
||||
TargetGroupArn: result.TargetGroups[0].TargetGroupArn,
|
||||
Targets: []*elbv2.TargetDescription{},
|
||||
tg := result.TargetGroups[0]
|
||||
tgARN := aws.StringValue(tg.TargetGroupArn)
|
||||
if err := c.ensureTargetGroupTargets(tgARN, expectedTargets, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, instanceID := range instances {
|
||||
registerInput.Targets = append(registerInput.Targets, &elbv2.TargetDescription{
|
||||
Id: aws.String(string(instanceID)),
|
||||
Port: aws.Int64(mapping.TrafficPort),
|
||||
})
|
||||
}
|
||||
|
||||
_, err = c.elbv2.RegisterTargets(registerInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error registering targets for load balancer: %q", err)
|
||||
}
|
||||
|
||||
return result.TargetGroups[0], nil
|
||||
return tg, nil
|
||||
}
|
||||
|
||||
// handle instances in service
|
||||
{
|
||||
healthResponse, err := c.elbv2.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{TargetGroupArn: targetGroup.TargetGroupArn})
|
||||
tgARN := aws.StringValue(targetGroup.TargetGroupArn)
|
||||
actualTargets, err := c.obtainTargetGroupActualTargets(tgARN)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error describing target group health: %q", err)
|
||||
return nil, err
|
||||
}
|
||||
actualIDs := []string{}
|
||||
for _, healthDescription := range healthResponse.TargetHealthDescriptions {
|
||||
if aws.StringValue(healthDescription.TargetHealth.State) == elbv2.TargetHealthStateEnumHealthy {
|
||||
actualIDs = append(actualIDs, *healthDescription.Target.Id)
|
||||
} else if healthDescription.TargetHealth.Reason != nil {
|
||||
switch aws.StringValue(healthDescription.TargetHealth.Reason) {
|
||||
case elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress:
|
||||
// We don't need to count this instance in service if it is
|
||||
// on its way out
|
||||
default:
|
||||
actualIDs = append(actualIDs, *healthDescription.Target.Id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
actual := sets.NewString(actualIDs...)
|
||||
expected := sets.NewString(instances...)
|
||||
|
||||
additions := expected.Difference(actual)
|
||||
removals := actual.Difference(expected)
|
||||
|
||||
if len(additions) > 0 {
|
||||
registerInput := &elbv2.RegisterTargetsInput{
|
||||
TargetGroupArn: targetGroup.TargetGroupArn,
|
||||
Targets: []*elbv2.TargetDescription{},
|
||||
}
|
||||
for instanceID := range additions {
|
||||
registerInput.Targets = append(registerInput.Targets, &elbv2.TargetDescription{
|
||||
Id: aws.String(instanceID),
|
||||
Port: aws.Int64(mapping.TrafficPort),
|
||||
})
|
||||
}
|
||||
_, err := c.elbv2.RegisterTargets(registerInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error registering new targets in target group: %q", err)
|
||||
}
|
||||
dirty = true
|
||||
}
|
||||
|
||||
if len(removals) > 0 {
|
||||
deregisterInput := &elbv2.DeregisterTargetsInput{
|
||||
TargetGroupArn: targetGroup.TargetGroupArn,
|
||||
Targets: []*elbv2.TargetDescription{},
|
||||
}
|
||||
for instanceID := range removals {
|
||||
deregisterInput.Targets = append(deregisterInput.Targets, &elbv2.TargetDescription{
|
||||
Id: aws.String(instanceID),
|
||||
Port: aws.Int64(mapping.TrafficPort),
|
||||
})
|
||||
}
|
||||
_, err := c.elbv2.DeregisterTargets(deregisterInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error trying to deregister targets in target group: %q", err)
|
||||
}
|
||||
dirty = true
|
||||
if err := c.ensureTargetGroupTargets(tgARN, expectedTargets, actualTargets); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -738,6 +680,101 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty
|
|||
return targetGroup, nil
|
||||
}
|
||||
|
||||
func (c *Cloud) ensureTargetGroupTargets(tgARN string, expectedTargets []*elbv2.TargetDescription, actualTargets []*elbv2.TargetDescription) error {
|
||||
targetsToRegister, targetsToDeregister := c.diffTargetGroupTargets(expectedTargets, actualTargets)
|
||||
if len(targetsToRegister) > 0 {
|
||||
targetsToRegisterChunks := c.chunkTargetDescriptions(targetsToRegister, defaultRegisterTargetsChunkSize)
|
||||
for _, targetsChunk := range targetsToRegisterChunks {
|
||||
req := &elbv2.RegisterTargetsInput{
|
||||
TargetGroupArn: aws.String(tgARN),
|
||||
Targets: targetsChunk,
|
||||
}
|
||||
if _, err := c.elbv2.RegisterTargets(req); err != nil {
|
||||
return fmt.Errorf("error trying to register targets in target group: %q", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(targetsToDeregister) > 0 {
|
||||
targetsToDeregisterChunks := c.chunkTargetDescriptions(targetsToDeregister, defaultDeregisterTargetsChunkSize)
|
||||
for _, targetsChunk := range targetsToDeregisterChunks {
|
||||
req := &elbv2.DeregisterTargetsInput{
|
||||
TargetGroupArn: aws.String(tgARN),
|
||||
Targets: targetsChunk,
|
||||
}
|
||||
if _, err := c.elbv2.DeregisterTargets(req); err != nil {
|
||||
return fmt.Errorf("error trying to deregister targets in target group: %q", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cloud) computeTargetGroupExpectedTargets(instanceIDs []string, port int64) []*elbv2.TargetDescription {
|
||||
expectedTargets := make([]*elbv2.TargetDescription, 0, len(instanceIDs))
|
||||
for _, instanceID := range instanceIDs {
|
||||
expectedTargets = append(expectedTargets, &elbv2.TargetDescription{
|
||||
Id: aws.String(instanceID),
|
||||
Port: aws.Int64(port),
|
||||
})
|
||||
}
|
||||
return expectedTargets
|
||||
}
|
||||
|
||||
func (c *Cloud) obtainTargetGroupActualTargets(tgARN string) ([]*elbv2.TargetDescription, error) {
|
||||
req := &elbv2.DescribeTargetHealthInput{
|
||||
TargetGroupArn: aws.String(tgARN),
|
||||
}
|
||||
resp, err := c.elbv2.DescribeTargetHealth(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error describing target group health: %q", err)
|
||||
}
|
||||
actualTargets := make([]*elbv2.TargetDescription, 0, len(resp.TargetHealthDescriptions))
|
||||
for _, targetDesc := range resp.TargetHealthDescriptions {
|
||||
if targetDesc.TargetHealth.Reason != nil && aws.StringValue(targetDesc.TargetHealth.Reason) == elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress {
|
||||
continue
|
||||
}
|
||||
actualTargets = append(actualTargets, targetDesc.Target)
|
||||
}
|
||||
return actualTargets, nil
|
||||
}
|
||||
|
||||
// diffTargetGroupTargets computes the targets to register and targets to deregister based on existingTargets and desired instances.
|
||||
func (c *Cloud) diffTargetGroupTargets(expectedTargets []*elbv2.TargetDescription, actualTargets []*elbv2.TargetDescription) (targetsToRegister []*elbv2.TargetDescription, targetsToDeregister []*elbv2.TargetDescription) {
|
||||
expectedTargetsByUID := make(map[string]*elbv2.TargetDescription, len(expectedTargets))
|
||||
for _, target := range expectedTargets {
|
||||
targetUID := fmt.Sprintf("%v:%v", aws.StringValue(target.Id), aws.Int64Value(target.Port))
|
||||
expectedTargetsByUID[targetUID] = target
|
||||
}
|
||||
actualTargetsByUID := make(map[string]*elbv2.TargetDescription, len(actualTargets))
|
||||
for _, target := range actualTargets {
|
||||
targetUID := fmt.Sprintf("%v:%v", aws.StringValue(target.Id), aws.Int64Value(target.Port))
|
||||
actualTargetsByUID[targetUID] = target
|
||||
}
|
||||
|
||||
expectedTargetsUIDs := sets.StringKeySet(expectedTargetsByUID)
|
||||
actualTargetsUIDs := sets.StringKeySet(actualTargetsByUID)
|
||||
for _, targetUID := range expectedTargetsUIDs.Difference(actualTargetsUIDs).List() {
|
||||
targetsToRegister = append(targetsToRegister, expectedTargetsByUID[targetUID])
|
||||
}
|
||||
for _, targetUID := range actualTargetsUIDs.Difference(expectedTargetsUIDs).List() {
|
||||
targetsToDeregister = append(targetsToDeregister, actualTargetsByUID[targetUID])
|
||||
}
|
||||
return targetsToRegister, targetsToDeregister
|
||||
}
|
||||
|
||||
// chunkTargetDescriptions will split slice of TargetDescription into chunks
|
||||
func (c *Cloud) chunkTargetDescriptions(targets []*elbv2.TargetDescription, chunkSize int) [][]*elbv2.TargetDescription {
|
||||
var chunks [][]*elbv2.TargetDescription
|
||||
for i := 0; i < len(targets); i += chunkSize {
|
||||
end := i + chunkSize
|
||||
if end > len(targets) {
|
||||
end = len(targets)
|
||||
}
|
||||
chunks = append(chunks, targets[i:end])
|
||||
}
|
||||
return chunks
|
||||
}
|
||||
|
||||
// updateInstanceSecurityGroupsForNLB will adjust securityGroup's settings to allow inbound traffic into instances from clientCIDRs and portMappings.
|
||||
// TIP: if either instances or clientCIDRs or portMappings are nil, then the securityGroup rules for lbName are cleared.
|
||||
func (c *Cloud) updateInstanceSecurityGroupsForNLB(lbName string, instances map[InstanceID]*ec2.Instance, subnetCIDRs []string, clientCIDRs []string, portMappings []nlbPortMapping) error {
|
||||
|
|
|
@ -2238,9 +2238,11 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbNa
|
|||
}
|
||||
dirtyPIP = true
|
||||
}
|
||||
changed := az.ensurePIPTagged(service, &pip)
|
||||
if changed {
|
||||
dirtyPIP = true
|
||||
if !isUserAssignedPIP {
|
||||
changed := az.ensurePIPTagged(service, &pip)
|
||||
if changed {
|
||||
dirtyPIP = true
|
||||
}
|
||||
}
|
||||
if shouldReleaseExistingOwnedPublicIP(&pip, wantLb, isInternal, isUserAssignedPIP, desiredPipName, serviceIPTagRequest) {
|
||||
// Then, release the public ip
|
||||
|
|
|
@ -365,9 +365,13 @@ func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, serv
|
|||
klog.Warningf("serviceOwnsFrontendIP: unexpected error when finding match public IP of the service %s with loadBalancerLP %s: %v", service.Name, loadBalancerIP, err)
|
||||
return false, isPrimaryService, nil
|
||||
}
|
||||
|
||||
if pip != nil && pip.ID != nil && pip.PublicIPAddressPropertiesFormat != nil && pip.IPAddress != nil {
|
||||
if strings.EqualFold(*pip.ID, *fip.PublicIPAddress.ID) {
|
||||
if pip != nil &&
|
||||
pip.ID != nil &&
|
||||
pip.PublicIPAddressPropertiesFormat != nil &&
|
||||
pip.IPAddress != nil &&
|
||||
fip.FrontendIPConfigurationPropertiesFormat != nil &&
|
||||
fip.FrontendIPConfigurationPropertiesFormat.PublicIPAddress != nil {
|
||||
if strings.EqualFold(to.String(pip.ID), to.String(fip.PublicIPAddress.ID)) {
|
||||
klog.V(4).Infof("serviceOwnsFrontendIP: found secondary service %s of the frontend IP config %s", service.Name, *fip.Name)
|
||||
|
||||
return true, isPrimaryService, nil
|
||||
|
|
|
@ -1033,7 +1033,6 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam
|
|||
})
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
newVM := &compute.VirtualMachineScaleSetVM{
|
||||
Sku: vm.Sku,
|
||||
Location: vm.Location,
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
|
@ -1175,7 +1174,6 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
|
|||
})
|
||||
primaryIPConfig.LoadBalancerBackendAddressPools = &loadBalancerBackendAddressPools
|
||||
newVMSS := compute.VirtualMachineScaleSet{
|
||||
Sku: vmss.Sku,
|
||||
Location: vmss.Location,
|
||||
VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
|
||||
VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
|
||||
|
@ -1356,7 +1354,6 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID str
|
|||
// Compose a new vmssVM with added backendPoolID.
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
newVM := &compute.VirtualMachineScaleSetVM{
|
||||
Sku: vm.Sku,
|
||||
Location: vm.Location,
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
|
@ -1478,7 +1475,6 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
|
|||
// Compose a new vmss with added backendPoolID.
|
||||
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
newVMSS := compute.VirtualMachineScaleSet{
|
||||
Sku: vmss.Sku,
|
||||
Location: vmss.Location,
|
||||
VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
|
||||
VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
|
||||
|
|
|
@ -193,6 +193,11 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cac
|
|||
}
|
||||
|
||||
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
if vm.NetworkProfile == nil || vm.NetworkProfile.NetworkInterfaces == nil {
|
||||
klog.Warningf("skip caching vmssVM %s since its network profile hasn't initialized yet (probably still under creating)", computerName)
|
||||
continue
|
||||
}
|
||||
|
||||
vmssVMCacheEntry := &vmssVirtualMachinesEntry{
|
||||
resourceGroup: resourceGroupName,
|
||||
vmssName: vmssName,
|
||||
|
|
|
@ -820,7 +820,7 @@ github.com/opencontainers/go-digest/digestset
|
|||
github.com/opencontainers/image-spec/identity
|
||||
github.com/opencontainers/image-spec/specs-go
|
||||
github.com/opencontainers/image-spec/specs-go/v1
|
||||
# github.com/opencontainers/runc v1.0.0-rc93 => github.com/opencontainers/runc v1.0.0-rc93.0.20210414171415-3397a09ee932
|
||||
# github.com/opencontainers/runc v1.0.0-rc95 => github.com/opencontainers/runc v1.0.0-rc95
|
||||
## explicit
|
||||
github.com/opencontainers/runc
|
||||
github.com/opencontainers/runc/contrib/cmd/recvtty
|
||||
|
@ -1238,7 +1238,7 @@ golang.org/x/oauth2/jwt
|
|||
golang.org/x/sync/errgroup
|
||||
golang.org/x/sync/semaphore
|
||||
golang.org/x/sync/singleflight
|
||||
# golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 => golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073
|
||||
# golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 => golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073
|
||||
## explicit
|
||||
golang.org/x/sys/cpu
|
||||
golang.org/x/sys/execabs
|
||||
|
@ -1444,7 +1444,7 @@ gopkg.in/warnings.v0
|
|||
gopkg.in/yaml.v2
|
||||
# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
gopkg.in/yaml.v3
|
||||
# k8s.io/api v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.1-k3s1
|
||||
# k8s.io/api v0.21.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.2-k3s1
|
||||
## explicit
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
|
@ -1492,7 +1492,7 @@ k8s.io/api/scheduling/v1beta1
|
|||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.1-k3s1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.2-k3s1
|
||||
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
||||
|
@ -1532,7 +1532,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
|
|||
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||
# k8s.io/apimachinery v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.1-k3s1
|
||||
# k8s.io/apimachinery v0.21.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.2-k3s1
|
||||
## explicit
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
|
@ -1597,7 +1597,7 @@ k8s.io/apimachinery/pkg/watch
|
|||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.1-k3s1
|
||||
# k8s.io/apiserver v0.21.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.2-k3s1
|
||||
## explicit
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/configuration
|
||||
|
@ -1736,11 +1736,11 @@ k8s.io/apiserver/plugin/pkg/audit/webhook
|
|||
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.1-k3s1
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.2-k3s1
|
||||
k8s.io/cli-runtime/pkg/genericclioptions
|
||||
k8s.io/cli-runtime/pkg/printers
|
||||
k8s.io/cli-runtime/pkg/resource
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.1-k3s1
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.2-k3s1
|
||||
## explicit
|
||||
k8s.io/client-go/applyconfigurations/admissionregistration/v1
|
||||
k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1
|
||||
|
@ -2028,7 +2028,7 @@ k8s.io/client-go/util/jsonpath
|
|||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.1-k3s1
|
||||
# k8s.io/cloud-provider v0.21.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.2-k3s1
|
||||
## explicit
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/api
|
||||
|
@ -2050,13 +2050,13 @@ k8s.io/cloud-provider/service/helpers
|
|||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/volume/errors
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.1-k3s1
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.2-k3s1
|
||||
k8s.io/cluster-bootstrap/token/api
|
||||
k8s.io/cluster-bootstrap/token/jws
|
||||
k8s.io/cluster-bootstrap/token/util
|
||||
k8s.io/cluster-bootstrap/util/secrets
|
||||
k8s.io/cluster-bootstrap/util/tokens
|
||||
# k8s.io/code-generator v0.19.7 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.1-k3s1
|
||||
# k8s.io/code-generator v0.19.7 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.2-k3s1
|
||||
k8s.io/code-generator/cmd/client-gen/args
|
||||
k8s.io/code-generator/cmd/client-gen/generators
|
||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||
|
@ -2071,7 +2071,7 @@ k8s.io/code-generator/cmd/lister-gen/args
|
|||
k8s.io/code-generator/cmd/lister-gen/generators
|
||||
k8s.io/code-generator/pkg/namer
|
||||
k8s.io/code-generator/pkg/util
|
||||
# k8s.io/component-base v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.1-k3s1
|
||||
# k8s.io/component-base v0.21.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.2-k3s1
|
||||
## explicit
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/cli/globalflag
|
||||
|
@ -2097,7 +2097,7 @@ k8s.io/component-base/metrics/testutil
|
|||
k8s.io/component-base/term
|
||||
k8s.io/component-base/version
|
||||
k8s.io/component-base/version/verflag
|
||||
# k8s.io/component-helpers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.1-k3s1
|
||||
# k8s.io/component-helpers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.2-k3s1
|
||||
k8s.io/component-helpers/apimachinery/lease
|
||||
k8s.io/component-helpers/apps/poddisruptionbudget
|
||||
k8s.io/component-helpers/auth/rbac/reconciliation
|
||||
|
@ -2106,7 +2106,7 @@ k8s.io/component-helpers/node/topology
|
|||
k8s.io/component-helpers/scheduling/corev1
|
||||
k8s.io/component-helpers/scheduling/corev1/nodeaffinity
|
||||
k8s.io/component-helpers/storage/volume
|
||||
# k8s.io/controller-manager v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.1-k3s1
|
||||
# k8s.io/controller-manager v0.21.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.2-k3s1
|
||||
## explicit
|
||||
k8s.io/controller-manager/app
|
||||
k8s.io/controller-manager/config
|
||||
|
@ -2119,11 +2119,11 @@ k8s.io/controller-manager/pkg/informerfactory
|
|||
k8s.io/controller-manager/pkg/leadermigration
|
||||
k8s.io/controller-manager/pkg/leadermigration/config
|
||||
k8s.io/controller-manager/pkg/leadermigration/options
|
||||
# k8s.io/cri-api v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.1-k3s1
|
||||
# k8s.io/cri-api v0.21.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.2-k3s1
|
||||
## explicit
|
||||
k8s.io/cri-api/pkg/apis
|
||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.1-k3s1
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.2-k3s1
|
||||
k8s.io/csi-translation-lib
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
# k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027
|
||||
|
@ -2141,7 +2141,7 @@ k8s.io/heapster/metrics/api/v1/types
|
|||
k8s.io/klog
|
||||
# k8s.io/klog/v2 v2.8.0
|
||||
k8s.io/klog/v2
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.1-k3s1
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.2-k3s1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
|
@ -2169,7 +2169,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
|
|||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.1-k3s1
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.2-k3s1
|
||||
k8s.io/kube-controller-manager/config/v1alpha1
|
||||
# k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7
|
||||
k8s.io/kube-openapi/pkg/aggregator
|
||||
|
@ -2185,13 +2185,13 @@ k8s.io/kube-openapi/pkg/validation/spec
|
|||
k8s.io/kube-openapi/pkg/validation/strfmt
|
||||
k8s.io/kube-openapi/pkg/validation/strfmt/bson
|
||||
k8s.io/kube-openapi/pkg/validation/validate
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.1-k3s1
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.2-k3s1
|
||||
k8s.io/kube-proxy/config/v1alpha1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.1-k3s1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.2-k3s1
|
||||
k8s.io/kube-scheduler/config/v1
|
||||
k8s.io/kube-scheduler/config/v1beta1
|
||||
k8s.io/kube-scheduler/extender/v1
|
||||
# k8s.io/kubectl v0.21.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.1-k3s1
|
||||
# k8s.io/kubectl v0.21.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.2-k3s1
|
||||
## explicit
|
||||
k8s.io/kubectl/pkg/apps
|
||||
k8s.io/kubectl/pkg/cmd
|
||||
|
@ -2270,7 +2270,7 @@ k8s.io/kubectl/pkg/util/storage
|
|||
k8s.io/kubectl/pkg/util/templates
|
||||
k8s.io/kubectl/pkg/util/term
|
||||
k8s.io/kubectl/pkg/validation
|
||||
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.1-k3s1
|
||||
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.2-k3s1
|
||||
k8s.io/kubelet/config/v1alpha1
|
||||
k8s.io/kubelet/config/v1beta1
|
||||
k8s.io/kubelet/pkg/apis
|
||||
|
@ -2282,7 +2282,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
|||
k8s.io/kubelet/pkg/apis/podresources/v1
|
||||
k8s.io/kubelet/pkg/apis/podresources/v1alpha1
|
||||
k8s.io/kubelet/pkg/apis/stats/v1alpha1
|
||||
# k8s.io/kubernetes v1.21.1 => github.com/k3s-io/kubernetes v1.21.1-k3s1
|
||||
# k8s.io/kubernetes v1.21.2 => github.com/k3s-io/kubernetes v1.21.2-k3s1
|
||||
## explicit
|
||||
k8s.io/kubernetes/cmd/kube-apiserver/app
|
||||
k8s.io/kubernetes/cmd/kube-apiserver/app/options
|
||||
|
@ -3010,7 +3010,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
|
|||
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.1-k3s1
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.2-k3s1
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/azure
|
||||
k8s.io/legacy-cloud-providers/azure/auth
|
||||
|
@ -3053,7 +3053,7 @@ k8s.io/legacy-cloud-providers/openstack
|
|||
k8s.io/legacy-cloud-providers/vsphere
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.1-k3s1
|
||||
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.2-k3s1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
||||
|
@ -3069,7 +3069,7 @@ k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1
|
|||
k8s.io/metrics/pkg/client/custom_metrics
|
||||
k8s.io/metrics/pkg/client/custom_metrics/scheme
|
||||
k8s.io/metrics/pkg/client/external_metrics
|
||||
# k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.1-k3s1
|
||||
# k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.2-k3s1
|
||||
k8s.io/mount-utils
|
||||
# k8s.io/utils v0.0.0-20201110183641-67b214c5f920
|
||||
## explicit
|
||||
|
@ -3088,7 +3088,7 @@ k8s.io/utils/path
|
|||
k8s.io/utils/pointer
|
||||
k8s.io/utils/strings
|
||||
k8s.io/utils/trace
|
||||
# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15
|
||||
# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
|
||||
# sigs.k8s.io/kustomize/api v0.8.8
|
||||
|
@ -3196,7 +3196,7 @@ sigs.k8s.io/yaml
|
|||
# github.com/juju/errors => github.com/k3s-io/nocode v0.0.0-20200630202308-cb097102c09f
|
||||
# github.com/kubernetes-sigs/cri-tools => github.com/k3s-io/cri-tools v1.21.0-k3s1
|
||||
# github.com/matryer/moq => github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009
|
||||
# github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-rc93.0.20210414171415-3397a09ee932
|
||||
# github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-rc95
|
||||
# github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v1.0.3-0.20210316141917-a8c4a9ee0f6b
|
||||
# github.com/rancher/k3s/pkg/data => ./pkg/data
|
||||
# go.etcd.io/etcd => github.com/k3s-io/etcd v0.5.0-alpha.5.0.20201208200253-50621aee4aea
|
||||
|
@ -3206,32 +3206,32 @@ sigs.k8s.io/yaml
|
|||
# google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884
|
||||
# google.golang.org/grpc => google.golang.org/grpc v1.27.1
|
||||
# gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
|
||||
# k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.1-k3s1
|
||||
# k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.1-k3s1
|
||||
# k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.1-k3s1
|
||||
# k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.1-k3s1
|
||||
# k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.1-k3s1
|
||||
# k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.1-k3s1
|
||||
# k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.1-k3s1
|
||||
# k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.1-k3s1
|
||||
# k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.1-k3s1
|
||||
# k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.1-k3s1
|
||||
# k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.1-k3s1
|
||||
# k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.1-k3s1
|
||||
# k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.1-k3s1
|
||||
# k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.1-k3s1
|
||||
# k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.1-k3s1
|
||||
# k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.1-k3s1
|
||||
# k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.1-k3s1
|
||||
# k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.1-k3s1
|
||||
# k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.1-k3s1
|
||||
# k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.1-k3s1
|
||||
# k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.21.1-k3s1
|
||||
# k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.1-k3s1
|
||||
# k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.1-k3s1
|
||||
# k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.1-k3s1
|
||||
# k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.21.1-k3s1
|
||||
# k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.21.1-k3s1
|
||||
# k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.21.1-k3s1
|
||||
# k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.21.1-k3s1
|
||||
# k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.21.2-k3s1
|
||||
# k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.21.2-k3s1
|
||||
# k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.21.2-k3s1
|
||||
# k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.21.2-k3s1
|
||||
# k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.21.2-k3s1
|
||||
# k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.21.2-k3s1
|
||||
# k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.21.2-k3s1
|
||||
# k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.21.2-k3s1
|
||||
# k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.21.2-k3s1
|
||||
# k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.21.2-k3s1
|
||||
# k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.21.2-k3s1
|
||||
# k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.21.2-k3s1
|
||||
# k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.21.2-k3s1
|
||||
# k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.21.2-k3s1
|
||||
# k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.21.2-k3s1
|
||||
# k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.21.2-k3s1
|
||||
# k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.21.2-k3s1
|
||||
# k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.21.2-k3s1
|
||||
# k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.21.2-k3s1
|
||||
# k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.21.2-k3s1
|
||||
# k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.21.2-k3s1
|
||||
# k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.21.2-k3s1
|
||||
# k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.21.2-k3s1
|
||||
# k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.2-k3s1
|
||||
# k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.21.2-k3s1
|
||||
# k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.21.2-k3s1
|
||||
# k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.21.2-k3s1
|
||||
# k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.21.2-k3s1
|
||||
# mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7
|
||||
|
|
36
vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go
generated
vendored
36
vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go
generated
vendored
|
@ -19,6 +19,7 @@ package client
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
|
@ -49,6 +50,10 @@ type grpcTunnel struct {
|
|||
conns map[int64]*conn
|
||||
pendingDialLock sync.RWMutex
|
||||
connsLock sync.RWMutex
|
||||
|
||||
// The tunnel will be closed if the caller fails to read via conn.Read()
|
||||
// more than readTimeoutSeconds after a packet has been received.
|
||||
readTimeoutSeconds int
|
||||
}
|
||||
|
||||
type clientConn interface {
|
||||
|
@ -75,9 +80,10 @@ func CreateSingleUseGrpcTunnel(address string, opts ...grpc.DialOption) (Tunnel,
|
|||
}
|
||||
|
||||
tunnel := &grpcTunnel{
|
||||
stream: stream,
|
||||
pendingDial: make(map[int64]chan<- dialResult),
|
||||
conns: make(map[int64]*conn),
|
||||
stream: stream,
|
||||
pendingDial: make(map[int64]chan<- dialResult),
|
||||
conns: make(map[int64]*conn),
|
||||
readTimeoutSeconds: 10,
|
||||
}
|
||||
|
||||
go tunnel.serve(c)
|
||||
|
@ -110,10 +116,17 @@ func (t *grpcTunnel) serve(c clientConn) {
|
|||
if !ok {
|
||||
klog.V(1).Infoln("DialResp not recognized; dropped")
|
||||
} else {
|
||||
ch <- dialResult{
|
||||
result := dialResult{
|
||||
err: resp.Error,
|
||||
connid: resp.ConnectID,
|
||||
}
|
||||
select {
|
||||
case ch <- result:
|
||||
default:
|
||||
klog.ErrorS(fmt.Errorf("blocked pending channel"), "Received second dial response for connection request", "connectionID", resp.ConnectID, "dialID", resp.Random)
|
||||
// On multiple dial responses, avoid leaking serve goroutine.
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if resp.Error != "" {
|
||||
|
@ -129,7 +142,14 @@ func (t *grpcTunnel) serve(c clientConn) {
|
|||
t.connsLock.RUnlock()
|
||||
|
||||
if ok {
|
||||
conn.readCh <- resp.Data
|
||||
timer := time.NewTimer((time.Duration)(t.readTimeoutSeconds) * time.Second)
|
||||
select {
|
||||
case conn.readCh <- resp.Data:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
klog.ErrorS(fmt.Errorf("timeout"), "readTimeout has been reached, the grpc connection to the proxy server will be closed", "connectionID", conn.connID, "readTimeoutSeconds", t.readTimeoutSeconds)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
klog.V(1).InfoS("connection not recognized", "connectionID", resp.ConnectID)
|
||||
}
|
||||
|
@ -160,8 +180,8 @@ func (t *grpcTunnel) Dial(protocol, address string) (net.Conn, error) {
|
|||
return nil, errors.New("protocol not supported")
|
||||
}
|
||||
|
||||
random := rand.Int63()
|
||||
resCh := make(chan dialResult)
|
||||
random := rand.Int63() /* #nosec G404 */
|
||||
resCh := make(chan dialResult, 1)
|
||||
t.pendingDialLock.Lock()
|
||||
t.pendingDial[random] = resCh
|
||||
t.pendingDialLock.Unlock()
|
||||
|
@ -199,7 +219,7 @@ func (t *grpcTunnel) Dial(protocol, address string) (net.Conn, error) {
|
|||
}
|
||||
c.connID = res.connid
|
||||
c.readCh = make(chan []byte, 10)
|
||||
c.closeCh = make(chan string)
|
||||
c.closeCh = make(chan string, 1)
|
||||
t.connsLock.Lock()
|
||||
t.conns[res.connid] = c
|
||||
t.connsLock.Unlock()
|
||||
|
|
Loading…
Reference in New Issue