mirror of https://github.com/k3s-io/k3s
Add Annotations from the deviceplugin to the runtime
parent
eb5035b08d
commit
db537e5954
|
@ -619,6 +619,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe
|
|||
opts.Devices = append(opts.Devices, devOpts.Devices...)
|
||||
opts.Mounts = append(opts.Mounts, devOpts.Mounts...)
|
||||
opts.Envs = append(opts.Envs, devOpts.Envs...)
|
||||
opts.Annotations = append(opts.Annotations, devOpts.Annotations...)
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -191,6 +191,7 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic
|
|||
devsMap := make(map[string]string)
|
||||
mountsMap := make(map[string]string)
|
||||
envsMap := make(map[string]string)
|
||||
annotationsMap := make(map[string]string)
|
||||
// Loops through AllocationResponses of all cached device resources.
|
||||
for _, devices := range resources {
|
||||
resp := devices.allocResp
|
||||
|
@ -198,17 +199,18 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic
|
|||
// Environment variables
|
||||
// Mount points
|
||||
// Device files
|
||||
// Container annotations
|
||||
// These artifacts are per resource per container.
|
||||
// Updates RunContainerOptions.Envs.
|
||||
for k, v := range resp.Envs {
|
||||
if e, ok := envsMap[k]; ok {
|
||||
glog.V(3).Infof("skip existing env %s %s", k, v)
|
||||
glog.V(4).Infof("Skip existing env %s %s", k, v)
|
||||
if e != v {
|
||||
glog.Errorf("Environment variable %s has conflicting setting: %s and %s", k, e, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("add env %s %s", k, v)
|
||||
glog.V(4).Infof("Add env %s %s", k, v)
|
||||
envsMap[k] = v
|
||||
opts.Envs = append(opts.Envs, kubecontainer.EnvVar{Name: k, Value: v})
|
||||
}
|
||||
|
@ -216,14 +218,14 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic
|
|||
// Updates RunContainerOptions.Devices.
|
||||
for _, dev := range resp.Devices {
|
||||
if d, ok := devsMap[dev.ContainerPath]; ok {
|
||||
glog.V(3).Infof("skip existing device %s %s", dev.ContainerPath, dev.HostPath)
|
||||
glog.V(4).Infof("Skip existing device %s %s", dev.ContainerPath, dev.HostPath)
|
||||
if d != dev.HostPath {
|
||||
glog.Errorf("Container device %s has conflicting mapping host devices: %s and %s",
|
||||
dev.ContainerPath, d, dev.HostPath)
|
||||
}
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("add device %s %s", dev.ContainerPath, dev.HostPath)
|
||||
glog.V(4).Infof("Add device %s %s", dev.ContainerPath, dev.HostPath)
|
||||
devsMap[dev.ContainerPath] = dev.HostPath
|
||||
opts.Devices = append(opts.Devices, kubecontainer.DeviceInfo{
|
||||
PathOnHost: dev.HostPath,
|
||||
|
@ -231,17 +233,18 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic
|
|||
Permissions: dev.Permissions,
|
||||
})
|
||||
}
|
||||
|
||||
// Updates RunContainerOptions.Mounts.
|
||||
for _, mount := range resp.Mounts {
|
||||
if m, ok := mountsMap[mount.ContainerPath]; ok {
|
||||
glog.V(3).Infof("skip existing mount %s %s", mount.ContainerPath, mount.HostPath)
|
||||
glog.V(4).Infof("Skip existing mount %s %s", mount.ContainerPath, mount.HostPath)
|
||||
if m != mount.HostPath {
|
||||
glog.Errorf("Container mount %s has conflicting mapping host mounts: %s and %s",
|
||||
mount.ContainerPath, m, mount.HostPath)
|
||||
}
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("add mount %s %s", mount.ContainerPath, mount.HostPath)
|
||||
glog.V(4).Infof("Add mount %s %s", mount.ContainerPath, mount.HostPath)
|
||||
mountsMap[mount.ContainerPath] = mount.HostPath
|
||||
opts.Mounts = append(opts.Mounts, kubecontainer.Mount{
|
||||
Name: mount.ContainerPath,
|
||||
|
@ -252,6 +255,20 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic
|
|||
SELinuxRelabel: false,
|
||||
})
|
||||
}
|
||||
|
||||
// Updates for Annotations
|
||||
for k, v := range resp.Annotations {
|
||||
if e, ok := annotationsMap[k]; ok {
|
||||
glog.V(4).Infof("Skip existing annotation %s %s", k, v)
|
||||
if e != v {
|
||||
glog.Errorf("Annotation %s has conflicting setting: %s and %s", k, e, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Add annotation %s %s", k, v)
|
||||
annotationsMap[k] = v
|
||||
opts.Annotations = append(opts.Annotations, kubecontainer.Annotation{Name: k, Value: v})
|
||||
}
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
|
|
@ -66,6 +66,8 @@ type DeviceRunContainerOptions struct {
|
|||
Mounts []kubecontainer.Mount
|
||||
// The host devices mapped into the container.
|
||||
Devices []kubecontainer.DeviceInfo
|
||||
// The Annotations for the container
|
||||
Annotations []kubecontainer.Annotation
|
||||
}
|
||||
|
||||
// TODO: evaluate whether we need these error definitions.
|
||||
|
|
|
@ -382,6 +382,11 @@ type EnvVar struct {
|
|||
Value string
|
||||
}
|
||||
|
||||
type Annotation struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
// Name of the volume mount.
|
||||
// TODO(yifan): Remove this field, as this is not representing the unique name of the mount,
|
||||
|
@ -431,6 +436,10 @@ type RunContainerOptions struct {
|
|||
Devices []DeviceInfo
|
||||
// The port mappings for the containers.
|
||||
PortMappings []PortMapping
|
||||
// The annotations for the container
|
||||
// These annotations are generated by other components (i.e.,
|
||||
// not users). Currently, only device plugins populate the annotations.
|
||||
Annotations []Annotation
|
||||
// If the container has specified the TerminationMessagePath, then
|
||||
// this directory will be used to create and mount the log file to
|
||||
// container.TerminationMessagePath
|
||||
|
|
|
@ -202,7 +202,7 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
|
|||
Args: args,
|
||||
WorkingDir: container.WorkingDir,
|
||||
Labels: newContainerLabels(container, pod, containerType),
|
||||
Annotations: newContainerAnnotations(container, pod, restartCount),
|
||||
Annotations: newContainerAnnotations(container, pod, restartCount, opts),
|
||||
Devices: makeDevices(opts),
|
||||
Mounts: m.makeMounts(opts, container),
|
||||
LogPath: containerLogsPath,
|
||||
|
|
|
@ -222,7 +222,7 @@ func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerInde
|
|||
Args: []string(nil),
|
||||
WorkingDir: container.WorkingDir,
|
||||
Labels: newContainerLabels(container, pod, kubecontainer.ContainerTypeRegular),
|
||||
Annotations: newContainerAnnotations(container, pod, restartCount),
|
||||
Annotations: newContainerAnnotations(container, pod, restartCount, opts),
|
||||
Devices: makeDevices(opts),
|
||||
Mounts: m.makeMounts(opts, container),
|
||||
LogPath: containerLogsPath,
|
||||
|
|
|
@ -111,8 +111,14 @@ func newContainerLabels(container *v1.Container, pod *v1.Pod, containerType kube
|
|||
}
|
||||
|
||||
// newContainerAnnotations creates container annotations from v1.Container and v1.Pod.
|
||||
func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount int) map[string]string {
|
||||
func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount int, opts *kubecontainer.RunContainerOptions) map[string]string {
|
||||
annotations := map[string]string{}
|
||||
|
||||
// Kubelet always overrides device plugin annotations if they are conflicting
|
||||
for _, a := range opts.Annotations {
|
||||
annotations[a.Name] = a.Value
|
||||
}
|
||||
|
||||
annotations[containerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
|
||||
annotations[containerRestartCountLabel] = strconv.Itoa(restartCount)
|
||||
annotations[containerTerminationMessagePathLabel] = container.TerminationMessagePath
|
||||
|
|
|
@ -156,6 +156,11 @@ func TestContainerAnnotations(t *testing.T) {
|
|||
restartCount := 5
|
||||
deletionGracePeriod := int64(10)
|
||||
terminationGracePeriod := int64(10)
|
||||
opts := &kubecontainer.RunContainerOptions{
|
||||
Annotations: []kubecontainer.Annotation{
|
||||
{Name: "Foo", Value: "bar"},
|
||||
},
|
||||
}
|
||||
lifecycle := &v1.Lifecycle{
|
||||
// Left PostStart as nil
|
||||
PreStop: &v1.Handler{
|
||||
|
@ -216,11 +221,14 @@ func TestContainerAnnotations(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test whether we can get right information from label
|
||||
annotations := newContainerAnnotations(container, pod, restartCount)
|
||||
annotations := newContainerAnnotations(container, pod, restartCount, opts)
|
||||
containerInfo := getContainerInfoFromAnnotations(annotations)
|
||||
if !reflect.DeepEqual(containerInfo, expected) {
|
||||
t.Errorf("expected %v, got %v", expected, containerInfo)
|
||||
}
|
||||
if v, ok := annotations[opts.Annotations[0].Name]; !ok || v != opts.Annotations[0].Value {
|
||||
t.Errorf("expected annotation %s to exist got %v, %v", opts.Annotations[0].Name, ok, v)
|
||||
}
|
||||
|
||||
// Test when DeletionGracePeriodSeconds, TerminationGracePeriodSeconds and Lifecycle are nil,
|
||||
// the information got from annotations should also be nil
|
||||
|
@ -232,11 +240,14 @@ func TestContainerAnnotations(t *testing.T) {
|
|||
expected.PreStopHandler = nil
|
||||
// Because container is changed, the Hash should be updated
|
||||
expected.Hash = kubecontainer.HashContainer(container)
|
||||
annotations = newContainerAnnotations(container, pod, restartCount)
|
||||
annotations = newContainerAnnotations(container, pod, restartCount, opts)
|
||||
containerInfo = getContainerInfoFromAnnotations(annotations)
|
||||
if !reflect.DeepEqual(containerInfo, expected) {
|
||||
t.Errorf("expected %v, got %v", expected, containerInfo)
|
||||
}
|
||||
if v, ok := annotations[opts.Annotations[0].Name]; !ok || v != opts.Annotations[0].Value {
|
||||
t.Errorf("expected annotation %s to exist got %v, %v", opts.Annotations[0].Name, ok, v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodLabels(t *testing.T) {
|
||||
|
|
Loading…
Reference in New Issue