mirror of https://github.com/k3s-io/k3s
Merge pull request #16414 from Random-Liu/put-podname-into-label
Auto commit by PR queue botpull/6/head
commit
eb140495ab
|
@ -211,6 +211,7 @@ func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {
|
||||||
return p.puller.IsImagePresent(name)
|
return p.puller.IsImagePresent(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO (random-liu) Almost never used, should we remove this?
|
||||||
// DockerContainers is a map of containers
|
// DockerContainers is a map of containers
|
||||||
type DockerContainers map[kubetypes.DockerID]*docker.APIContainers
|
type DockerContainers map[kubetypes.DockerID]*docker.APIContainers
|
||||||
|
|
||||||
|
|
|
@ -146,13 +146,14 @@ func (f *FakeDockerClient) ListContainers(options docker.ListContainersOptions)
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
f.called = append(f.called, "list")
|
f.called = append(f.called, "list")
|
||||||
err := f.popError("list")
|
err := f.popError("list")
|
||||||
|
containerList := append([]docker.APIContainers{}, f.ContainerList...)
|
||||||
if options.All {
|
if options.All {
|
||||||
// Althought the container is not sorted, but the container with the same name should be in order,
|
// Althought the container is not sorted, but the container with the same name should be in order,
|
||||||
// that is enough for us now.
|
// that is enough for us now.
|
||||||
// TODO (random-liu) Is a fully sorted array needed?
|
// TODO (random-liu) Is a fully sorted array needed?
|
||||||
return append(f.ContainerList, f.ExitedContainerList...), err
|
containerList = append(containerList, f.ExitedContainerList...)
|
||||||
}
|
}
|
||||||
return append([]docker.APIContainers{}, f.ContainerList...), err
|
return containerList, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// InspectContainer is a test-spy implementation of DockerInterface.InspectContainer.
|
// InspectContainer is a test-spy implementation of DockerInterface.InspectContainer.
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
/*
|
||||||
|
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package dockertools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This file contains all docker label related constants and functions, including:
|
||||||
|
// * label setters and getters
|
||||||
|
// * label filters (maybe in the future)
|
||||||
|
|
||||||
|
const (
|
||||||
|
kubernetesPodNameLabel = "io.kubernetes.pod.name"
|
||||||
|
kubernetesPodNamespaceLabel = "io.kubernetes.pod.namespace"
|
||||||
|
kubernetesPodUID = "io.kubernetes.pod.uid"
|
||||||
|
|
||||||
|
kubernetesPodLabel = "io.kubernetes.pod.data"
|
||||||
|
kubernetesTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
|
||||||
|
kubernetesContainerLabel = "io.kubernetes.container.name"
|
||||||
|
kubernetesContainerRestartCountLabel = "io.kubernetes.container.restartCount"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newLabels(container *api.Container, pod *api.Pod, restartCount int) map[string]string {
|
||||||
|
// TODO (random-liu) Move more label initialization here
|
||||||
|
labels := map[string]string{}
|
||||||
|
labels[kubernetesPodNameLabel] = pod.Name
|
||||||
|
labels[kubernetesPodNamespaceLabel] = pod.Namespace
|
||||||
|
labels[kubernetesPodUID] = string(pod.UID)
|
||||||
|
|
||||||
|
labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(restartCount)
|
||||||
|
|
||||||
|
return labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRestartCountFromLabel(labels map[string]string) (restartCount int, err error) {
|
||||||
|
if restartCountString, found := labels[kubernetesContainerRestartCountLabel]; found {
|
||||||
|
restartCount, err = strconv.Atoi(restartCountString)
|
||||||
|
if err != nil {
|
||||||
|
// This really should not happen. Just set restartCount to 0 to handle this abnormal case
|
||||||
|
restartCount = 0
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Get restartCount from docker label. If there is no restart count label in a container,
|
||||||
|
// it should be an old container or an invalid container, we just set restart count to 0.
|
||||||
|
// Do not report error, because there should be many old containers without this label now
|
||||||
|
glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", kubernetesContainerRestartCountLabel)
|
||||||
|
}
|
||||||
|
return restartCount, err
|
||||||
|
}
|
|
@ -69,12 +69,6 @@ const (
|
||||||
// SIGTERM for certain process types, which may justify setting this to 0.
|
// SIGTERM for certain process types, which may justify setting this to 0.
|
||||||
minimumGracePeriodInSeconds = 2
|
minimumGracePeriodInSeconds = 2
|
||||||
|
|
||||||
kubernetesNameLabel = "io.kubernetes.pod.name"
|
|
||||||
kubernetesPodLabel = "io.kubernetes.pod.data"
|
|
||||||
kubernetesTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
|
|
||||||
kubernetesContainerLabel = "io.kubernetes.container.name"
|
|
||||||
kubernetesContainerRestartCountLabel = "io.kubernetes.container.restartCount"
|
|
||||||
|
|
||||||
DockerNetnsFmt = "/proc/%v/ns/net"
|
DockerNetnsFmt = "/proc/%v/ns/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -359,18 +353,9 @@ func (dm *DockerManager) inspectContainer(dockerID, containerName, tPath string,
|
||||||
|
|
||||||
glog.V(4).Infof("Container inspect result: %+v", *inspectResult)
|
glog.V(4).Infof("Container inspect result: %+v", *inspectResult)
|
||||||
|
|
||||||
// Get restartCount from docker label, and add into the result.
|
|
||||||
// If there is no restart count label in an container:
|
|
||||||
// 1. It is an infraContainer, it will never use restart count.
|
|
||||||
// 2. It is an old container or an invalid container, we just set restart count to 0 now.
|
|
||||||
var restartCount int
|
var restartCount int
|
||||||
if restartCountString, found := inspectResult.Config.Labels[kubernetesContainerRestartCountLabel]; found {
|
if restartCount, err = getRestartCountFromLabel(inspectResult.Config.Labels); err != nil {
|
||||||
restartCount, err = strconv.Atoi(restartCountString)
|
glog.Errorf("Get restart count error for container %v: %v", dockerID, err)
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Error parsing restart count string %s for container %s: %v,", restartCountString, dockerID, err)
|
|
||||||
// Just set restartCount to 0 to handle this abnormal case
|
|
||||||
restartCount = 0
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
result.status = api.ContainerStatus{
|
result.status = api.ContainerStatus{
|
||||||
|
@ -461,6 +446,9 @@ func (dm *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) {
|
||||||
}
|
}
|
||||||
expectedContainers[PodInfraContainerName] = api.Container{}
|
expectedContainers[PodInfraContainerName] = api.Container{}
|
||||||
|
|
||||||
|
// We have added labels like pod name and pod namespace, it seems that we can do filtered list here.
|
||||||
|
// However, there may be some old containers without these labels, so at least now we can't do that.
|
||||||
|
// TODO (random-liu) Add filter when we are sure that all the containers have the labels
|
||||||
containers, err := dm.client.ListContainers(docker.ListContainersOptions{All: true})
|
containers, err := dm.client.ListContainers(docker.ListContainersOptions{All: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -471,7 +459,6 @@ func (dm *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) {
|
||||||
// the statuses. We assume docker returns a list of containers sorted in
|
// the statuses. We assume docker returns a list of containers sorted in
|
||||||
// reverse by time.
|
// reverse by time.
|
||||||
for _, value := range containers {
|
for _, value := range containers {
|
||||||
// TODO (random-liu) Filter by docker label later
|
|
||||||
if len(value.Names) == 0 {
|
if len(value.Names) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -688,7 +675,7 @@ func (dm *DockerManager) runContainer(
|
||||||
ipcMode string,
|
ipcMode string,
|
||||||
utsMode string,
|
utsMode string,
|
||||||
pidMode string,
|
pidMode string,
|
||||||
labels map[string]string) (kubecontainer.ContainerID, error) {
|
restartCount int) (kubecontainer.ContainerID, error) {
|
||||||
|
|
||||||
dockerName := KubeletContainerName{
|
dockerName := KubeletContainerName{
|
||||||
PodFullName: kubecontainer.GetPodFullName(pod),
|
PodFullName: kubecontainer.GetPodFullName(pod),
|
||||||
|
@ -709,12 +696,8 @@ func (dm *DockerManager) runContainer(
|
||||||
// while the Kubelet is down and there is no information available to recover the pod. This includes
|
// while the Kubelet is down and there is no information available to recover the pod. This includes
|
||||||
// termination information like the termination grace period and the pre stop hooks.
|
// termination information like the termination grace period and the pre stop hooks.
|
||||||
// TODO: keep these labels up to date if the pod changes
|
// TODO: keep these labels up to date if the pod changes
|
||||||
namespacedName := types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}
|
labels := newLabels(container, pod, restartCount)
|
||||||
// Just in case. If there is no label, just pass nil. An empty map will be created here.
|
|
||||||
if labels == nil {
|
|
||||||
labels = map[string]string{}
|
|
||||||
}
|
|
||||||
labels[kubernetesNameLabel] = namespacedName.String()
|
|
||||||
if pod.Spec.TerminationGracePeriodSeconds != nil {
|
if pod.Spec.TerminationGracePeriodSeconds != nil {
|
||||||
labels[kubernetesTerminationGracePeriodLabel] = strconv.FormatInt(*pod.Spec.TerminationGracePeriodSeconds, 10)
|
labels[kubernetesTerminationGracePeriodLabel] = strconv.FormatInt(*pod.Spec.TerminationGracePeriodSeconds, 10)
|
||||||
}
|
}
|
||||||
|
@ -1502,8 +1485,7 @@ func containerAndPodFromLabels(inspect *docker.Container) (pod *api.Pod, contain
|
||||||
|
|
||||||
// Run a single container from a pod. Returns the docker container ID
|
// Run a single container from a pod. Returns the docker container ID
|
||||||
// If do not need to pass labels, just pass nil.
|
// If do not need to pass labels, just pass nil.
|
||||||
// TODO (random-liu) Just add labels directly now, maybe should make some abstraction.
|
func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Container, netMode, ipcMode, pidMode string, restartCount int) (kubecontainer.ContainerID, error) {
|
||||||
func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Container, netMode, ipcMode, pidMode string, labels map[string]string) (kubecontainer.ContainerID, error) {
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
metrics.ContainerManagerLatency.WithLabelValues("runContainerInPod").Observe(metrics.SinceInMicroseconds(start))
|
metrics.ContainerManagerLatency.WithLabelValues("runContainerInPod").Observe(metrics.SinceInMicroseconds(start))
|
||||||
|
@ -1523,7 +1505,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
|
||||||
if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork {
|
if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork {
|
||||||
utsMode = "host"
|
utsMode = "host"
|
||||||
}
|
}
|
||||||
id, err := dm.runContainer(pod, container, opts, ref, netMode, ipcMode, utsMode, pidMode, labels)
|
id, err := dm.runContainer(pod, container, opts, ref, netMode, ipcMode, utsMode, pidMode, restartCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return kubecontainer.ContainerID{}, err
|
return kubecontainer.ContainerID{}, err
|
||||||
}
|
}
|
||||||
|
@ -1660,8 +1642,8 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubetypes.Docker
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// There is no meaningful labels for infraContainer now, so just pass nil.
|
// Currently we don't care about restart count of infra container, just set it to 0.
|
||||||
id, err := dm.runContainerInPod(pod, container, netNamespace, getIPCMode(pod), getPidMode(pod), nil)
|
id, err := dm.runContainerInPod(pod, container, netNamespace, getIPCMode(pod), getPidMode(pod), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -1922,17 +1904,16 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
labels := map[string]string{}
|
|
||||||
containerStatuses := podStatus.ContainerStatuses
|
containerStatuses := podStatus.ContainerStatuses
|
||||||
// podStatus is generated by GetPodStatus(). In GetPodStatus(), we make sure that ContainerStatuses
|
// podStatus is generated by GetPodStatus(). In GetPodStatus(), we make sure that ContainerStatuses
|
||||||
// contains statuses of all containers in pod.Spec.Containers.
|
// contains statuses of all containers in pod.Spec.Containers.
|
||||||
// ContainerToStart is a subset of pod.Spec.Containers, we should always find a result here.
|
// ContainerToStart is a subset of pod.Spec.Containers, we should always find a result here.
|
||||||
// For a new container, the RestartCount should be 0
|
// For a new container, the RestartCount should be 0
|
||||||
labels[kubernetesContainerRestartCountLabel] = "0"
|
restartCount := 0
|
||||||
for _, containerStatus := range containerStatuses {
|
for _, containerStatus := range containerStatuses {
|
||||||
// If the container's terminate state is not empty, it exited before. Increment the restart count.
|
// If the container's terminate state is not empty, it exited before. Increment the restart count.
|
||||||
if containerStatus.Name == container.Name && (containerStatus.State.Terminated != nil || containerStatus.LastTerminationState.Terminated != nil) {
|
if containerStatus.Name == container.Name && (containerStatus.State.Terminated != nil || containerStatus.LastTerminationState.Terminated != nil) {
|
||||||
labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(containerStatus.RestartCount + 1)
|
restartCount = containerStatus.RestartCount + 1
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1943,7 +1924,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod
|
||||||
// and IPC namespace. PID mode cannot point to another container right now.
|
// and IPC namespace. PID mode cannot point to another container right now.
|
||||||
// See createPodInfraContainer for infra container setup.
|
// See createPodInfraContainer for infra container setup.
|
||||||
namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID)
|
namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID)
|
||||||
_, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode, getPidMode(pod), labels)
|
_, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode, getPidMode(pod), restartCount)
|
||||||
dm.updateReasonCache(pod, container, kubecontainer.ErrRunContainer.Error(), err)
|
dm.updateReasonCache(pod, container, kubecontainer.ErrRunContainer.Error(), err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO(bburns) : Perhaps blacklist a container after N failures?
|
// TODO(bburns) : Perhaps blacklist a container after N failures?
|
||||||
|
|
|
@ -1168,6 +1168,17 @@ func TestGetPodStatusWithLastTermination(t *testing.T) {
|
||||||
{Name: "failed"},
|
{Name: "failed"},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
UID: "12345678",
|
||||||
|
Name: "foo",
|
||||||
|
Namespace: "new",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: containers,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
exitedAPIContainers := []docker.APIContainers{
|
exitedAPIContainers := []docker.APIContainers{
|
||||||
{
|
{
|
||||||
// format is // k8s_<container-id>_<pod-fullname>_<pod-uid>
|
// format is // k8s_<container-id>_<pod-fullname>_<pod-uid>
|
||||||
|
@ -1247,17 +1258,7 @@ func TestGetPodStatusWithLastTermination(t *testing.T) {
|
||||||
fakeDocker.ExitedContainerList = exitedAPIContainers
|
fakeDocker.ExitedContainerList = exitedAPIContainers
|
||||||
fakeDocker.ContainerMap = containerMap
|
fakeDocker.ContainerMap = containerMap
|
||||||
fakeDocker.ClearCalls()
|
fakeDocker.ClearCalls()
|
||||||
pod := &api.Pod{
|
pod.Spec.RestartPolicy = tt.policy
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
UID: "12345678",
|
|
||||||
Name: "foo",
|
|
||||||
Namespace: "new",
|
|
||||||
},
|
|
||||||
Spec: api.PodSpec{
|
|
||||||
Containers: containers,
|
|
||||||
RestartPolicy: tt.policy,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
fakeDocker.ContainerList = []docker.APIContainers{
|
fakeDocker.ContainerList = []docker.APIContainers{
|
||||||
{
|
{
|
||||||
// pod infra container
|
// pod infra container
|
||||||
|
|
Loading…
Reference in New Issue