mirror of https://github.com/k3s-io/k3s
Replace podFullName with format.Pod() in logging messages
parent
fda73c04ec
commit
c646255579
|
@ -23,6 +23,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util"
|
"k8s.io/kubernetes/pkg/util"
|
||||||
"k8s.io/kubernetes/third_party/golang/expansion"
|
"k8s.io/kubernetes/third_party/golang/expansion"
|
||||||
|
@ -44,8 +45,6 @@ type RunContainerOptionsGenerator interface {
|
||||||
// ShouldContainerBeRestarted checks whether a container needs to be restarted.
|
// ShouldContainerBeRestarted checks whether a container needs to be restarted.
|
||||||
// TODO(yifan): Think about how to refactor this.
|
// TODO(yifan): Think about how to refactor this.
|
||||||
func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatus *PodStatus) bool {
|
func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatus *PodStatus) bool {
|
||||||
podFullName := GetPodFullName(pod)
|
|
||||||
|
|
||||||
// Get all dead container status.
|
// Get all dead container status.
|
||||||
var resultStatus []*ContainerStatus
|
var resultStatus []*ContainerStatus
|
||||||
for _, containerStatus := range podStatus.ContainerStatuses {
|
for _, containerStatus := range podStatus.ContainerStatuses {
|
||||||
|
@ -57,14 +56,14 @@ func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatu
|
||||||
// Check RestartPolicy for dead container.
|
// Check RestartPolicy for dead container.
|
||||||
if len(resultStatus) > 0 {
|
if len(resultStatus) > 0 {
|
||||||
if pod.Spec.RestartPolicy == api.RestartPolicyNever {
|
if pod.Spec.RestartPolicy == api.RestartPolicyNever {
|
||||||
glog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, podFullName)
|
glog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if pod.Spec.RestartPolicy == api.RestartPolicyOnFailure {
|
if pod.Spec.RestartPolicy == api.RestartPolicyOnFailure {
|
||||||
// Check the exit code of last run. Note: This assumes the result is sorted
|
// Check the exit code of last run. Note: This assumes the result is sorted
|
||||||
// by the created time in reverse order.
|
// by the created time in reverse order.
|
||||||
if resultStatus[0].ExitCode == 0 {
|
if resultStatus[0].ExitCode == 0 {
|
||||||
glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, podFullName)
|
glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -74,8 +73,6 @@ func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatu
|
||||||
|
|
||||||
// TODO (random-liu) This should be removed soon after rkt implements GetPodStatus.
|
// TODO (random-liu) This should be removed soon after rkt implements GetPodStatus.
|
||||||
func ShouldContainerBeRestartedOldVersion(container *api.Container, pod *api.Pod, podStatus *api.PodStatus) bool {
|
func ShouldContainerBeRestartedOldVersion(container *api.Container, pod *api.Pod, podStatus *api.PodStatus) bool {
|
||||||
podFullName := GetPodFullName(pod)
|
|
||||||
|
|
||||||
// Get all dead container status.
|
// Get all dead container status.
|
||||||
var resultStatus []*api.ContainerStatus
|
var resultStatus []*api.ContainerStatus
|
||||||
for i, containerStatus := range podStatus.ContainerStatuses {
|
for i, containerStatus := range podStatus.ContainerStatuses {
|
||||||
|
@ -87,14 +84,14 @@ func ShouldContainerBeRestartedOldVersion(container *api.Container, pod *api.Pod
|
||||||
// Check RestartPolicy for dead container.
|
// Check RestartPolicy for dead container.
|
||||||
if len(resultStatus) > 0 {
|
if len(resultStatus) > 0 {
|
||||||
if pod.Spec.RestartPolicy == api.RestartPolicyNever {
|
if pod.Spec.RestartPolicy == api.RestartPolicyNever {
|
||||||
glog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, podFullName)
|
glog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if pod.Spec.RestartPolicy == api.RestartPolicyOnFailure {
|
if pod.Spec.RestartPolicy == api.RestartPolicyOnFailure {
|
||||||
// Check the exit code of last run. Note: This assumes the result is sorted
|
// Check the exit code of last run. Note: This assumes the result is sorted
|
||||||
// by the created time in reverse order.
|
// by the created time in reverse order.
|
||||||
if resultStatus[0].State.Terminated.ExitCode == 0 {
|
if resultStatus[0].State.Terminated.ExitCode == 0 {
|
||||||
glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, podFullName)
|
glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,6 +47,7 @@ import (
|
||||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
"k8s.io/kubernetes/pkg/securitycontext"
|
"k8s.io/kubernetes/pkg/securitycontext"
|
||||||
"k8s.io/kubernetes/pkg/types"
|
"k8s.io/kubernetes/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/util"
|
"k8s.io/kubernetes/pkg/util"
|
||||||
|
@ -1484,11 +1485,10 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
|
||||||
// full pod name, the container name and the Docker container ID. Cluster level logging will
|
// full pod name, the container name and the Docker container ID. Cluster level logging will
|
||||||
// capture these symbolic filenames which can be used for search terms in Elasticsearch or for
|
// capture these symbolic filenames which can be used for search terms in Elasticsearch or for
|
||||||
// labels for Cloud Logging.
|
// labels for Cloud Logging.
|
||||||
podFullName := kubecontainer.GetPodFullName(pod)
|
|
||||||
containerLogFile := path.Join(dm.dockerRoot, "containers", id.ID, fmt.Sprintf("%s-json.log", id.ID))
|
containerLogFile := path.Join(dm.dockerRoot, "containers", id.ID, fmt.Sprintf("%s-json.log", id.ID))
|
||||||
symlinkFile := LogSymlink(dm.containerLogsDir, podFullName, container.Name, id.ID)
|
symlinkFile := LogSymlink(dm.containerLogsDir, kubecontainer.GetPodFullName(pod), container.Name, id.ID)
|
||||||
if err = dm.os.Symlink(containerLogFile, symlinkFile); err != nil {
|
if err = dm.os.Symlink(containerLogFile, symlinkFile); err != nil {
|
||||||
glog.Errorf("Failed to create symbolic link to the log file of pod %q container %q: %v", podFullName, container.Name, err)
|
glog.Errorf("Failed to create symbolic link to the log file of pod %q container %q: %v", format.Pod(pod), container.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container information is used in adjusting OOM scores and adding ndots.
|
// Container information is used in adjusting OOM scores and adding ndots.
|
||||||
|
@ -1632,10 +1632,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
|
||||||
defer func() {
|
defer func() {
|
||||||
metrics.ContainerManagerLatency.WithLabelValues("computePodContainerChanges").Observe(metrics.SinceInMicroseconds(start))
|
metrics.ContainerManagerLatency.WithLabelValues("computePodContainerChanges").Observe(metrics.SinceInMicroseconds(start))
|
||||||
}()
|
}()
|
||||||
|
glog.V(4).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod)
|
||||||
podFullName := kubecontainer.GetPodFullName(pod)
|
|
||||||
uid := pod.UID
|
|
||||||
glog.V(4).Infof("Syncing Pod %+v, podFullName: %q, uid: %q", pod, podFullName, uid)
|
|
||||||
|
|
||||||
containersToStart := make(map[int]string)
|
containersToStart := make(map[int]string)
|
||||||
containersToKeep := make(map[kubetypes.DockerID]int)
|
containersToKeep := make(map[kubetypes.DockerID]int)
|
||||||
|
@ -1645,7 +1642,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
|
||||||
var changed bool
|
var changed bool
|
||||||
podInfraContainerStatus := podStatus.FindContainerStatusByName(PodInfraContainerName)
|
podInfraContainerStatus := podStatus.FindContainerStatusByName(PodInfraContainerName)
|
||||||
if podInfraContainerStatus != nil && podInfraContainerStatus.State == kubecontainer.ContainerStateRunning {
|
if podInfraContainerStatus != nil && podInfraContainerStatus.State == kubecontainer.ContainerStateRunning {
|
||||||
glog.V(4).Infof("Found pod infra container for %q", podFullName)
|
glog.V(4).Infof("Found pod infra container for %q", format.Pod(pod))
|
||||||
changed, err = dm.podInfraContainerChanged(pod, podInfraContainerStatus)
|
changed, err = dm.podInfraContainerChanged(pod, podInfraContainerStatus)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return PodContainerChangesSpec{}, err
|
return PodContainerChangesSpec{}, err
|
||||||
|
@ -1654,11 +1651,11 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
|
||||||
|
|
||||||
createPodInfraContainer := true
|
createPodInfraContainer := true
|
||||||
if podInfraContainerStatus == nil || podInfraContainerStatus.State != kubecontainer.ContainerStateRunning {
|
if podInfraContainerStatus == nil || podInfraContainerStatus.State != kubecontainer.ContainerStateRunning {
|
||||||
glog.V(2).Infof("Need to restart pod infra container for %q because it is not found", podFullName)
|
glog.V(2).Infof("Need to restart pod infra container for %q because it is not found", format.Pod(pod))
|
||||||
} else if changed {
|
} else if changed {
|
||||||
glog.V(2).Infof("Need to restart pod infra container for %q because it is changed", podFullName)
|
glog.V(2).Infof("Need to restart pod infra container for %q because it is changed", format.Pod(pod))
|
||||||
} else {
|
} else {
|
||||||
glog.V(4).Infof("Pod infra container looks good, keep it %q", podFullName)
|
glog.V(4).Infof("Pod infra container looks good, keep it %q", format.Pod(pod))
|
||||||
createPodInfraContainer = false
|
createPodInfraContainer = false
|
||||||
podInfraContainerID = kubetypes.DockerID(podInfraContainerStatus.ID.ID)
|
podInfraContainerID = kubetypes.DockerID(podInfraContainerStatus.ID.ID)
|
||||||
containersToKeep[podInfraContainerID] = -1
|
containersToKeep[podInfraContainerID] = -1
|
||||||
|
@ -1682,7 +1679,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
|
||||||
|
|
||||||
containerID := kubetypes.DockerID(containerStatus.ID.ID)
|
containerID := kubetypes.DockerID(containerStatus.ID.ID)
|
||||||
hash := containerStatus.Hash
|
hash := containerStatus.Hash
|
||||||
glog.V(3).Infof("pod %q container %q exists as %v", podFullName, container.Name, containerID)
|
glog.V(3).Infof("pod %q container %q exists as %v", format.Pod(pod), container.Name, containerID)
|
||||||
|
|
||||||
if createPodInfraContainer {
|
if createPodInfraContainer {
|
||||||
// createPodInfraContainer == true and Container exists
|
// createPodInfraContainer == true and Container exists
|
||||||
|
@ -1701,7 +1698,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
|
||||||
// We will look for changes and check healthiness for the container.
|
// We will look for changes and check healthiness for the container.
|
||||||
containerChanged := hash != 0 && hash != expectedHash
|
containerChanged := hash != 0 && hash != expectedHash
|
||||||
if containerChanged {
|
if containerChanged {
|
||||||
message := fmt.Sprintf("pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", podFullName, container.Name, hash, expectedHash)
|
message := fmt.Sprintf("pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", format.Pod(pod), container.Name, hash, expectedHash)
|
||||||
glog.Info(message)
|
glog.Info(message)
|
||||||
containersToStart[index] = message
|
containersToStart[index] = message
|
||||||
continue
|
continue
|
||||||
|
@ -1713,7 +1710,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
|
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
|
||||||
message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", podFullName, container.Name)
|
message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name)
|
||||||
glog.Info(message)
|
glog.Info(message)
|
||||||
containersToStart[index] = message
|
containersToStart[index] = message
|
||||||
}
|
}
|
||||||
|
@ -1759,26 +1756,24 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ kubecontainer.Pod, _ api.PodSta
|
||||||
metrics.ContainerManagerLatency.WithLabelValues("SyncPod").Observe(metrics.SinceInMicroseconds(start))
|
metrics.ContainerManagerLatency.WithLabelValues("SyncPod").Observe(metrics.SinceInMicroseconds(start))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
podFullName := kubecontainer.GetPodFullName(pod)
|
|
||||||
|
|
||||||
containerChanges, err := dm.computePodContainerChanges(pod, podStatus)
|
containerChanges, err := dm.computePodContainerChanges(pod, podStatus)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
glog.V(3).Infof("Got container changes for pod %q: %+v", podFullName, containerChanges)
|
glog.V(3).Infof("Got container changes for pod %q: %+v", format.Pod(pod), containerChanges)
|
||||||
|
|
||||||
if containerChanges.InfraChanged {
|
if containerChanges.InfraChanged {
|
||||||
ref, err := api.GetReference(pod)
|
ref, err := api.GetReference(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Couldn't make a ref to pod %q: '%v'", podFullName, err)
|
glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err)
|
||||||
}
|
}
|
||||||
dm.recorder.Eventf(ref, api.EventTypeNormal, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.")
|
dm.recorder.Eventf(ref, api.EventTypeNormal, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.")
|
||||||
}
|
}
|
||||||
if containerChanges.StartInfraContainer || (len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0) {
|
if containerChanges.StartInfraContainer || (len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0) {
|
||||||
if len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0 {
|
if len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0 {
|
||||||
glog.V(4).Infof("Killing Infra Container for %q because all other containers are dead.", podFullName)
|
glog.V(4).Infof("Killing Infra Container for %q because all other containers are dead.", format.Pod(pod))
|
||||||
} else {
|
} else {
|
||||||
glog.V(4).Infof("Killing Infra Container for %q, will start new one", podFullName)
|
glog.V(4).Infof("Killing Infra Container for %q, will start new one", format.Pod(pod))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Killing phase: if we want to start new infra container, or nothing is running kill everything (including infra container)
|
// Killing phase: if we want to start new infra container, or nothing is running kill everything (including infra container)
|
||||||
|
@ -1815,23 +1810,23 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ kubecontainer.Pod, _ api.PodSta
|
||||||
// If we should create infra container then we do it first.
|
// If we should create infra container then we do it first.
|
||||||
podInfraContainerID := containerChanges.InfraContainerId
|
podInfraContainerID := containerChanges.InfraContainerId
|
||||||
if containerChanges.StartInfraContainer && (len(containerChanges.ContainersToStart) > 0) {
|
if containerChanges.StartInfraContainer && (len(containerChanges.ContainersToStart) > 0) {
|
||||||
glog.V(4).Infof("Creating pod infra container for %q", podFullName)
|
glog.V(4).Infof("Creating pod infra container for %q", format.Pod(pod))
|
||||||
podInfraContainerID, err = dm.createPodInfraContainer(pod)
|
podInfraContainerID, err = dm.createPodInfraContainer(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed to create pod infra container: %v; Skipping pod %q", err, podFullName)
|
glog.Errorf("Failed to create pod infra container: %v; Skipping pod %q", err, format.Pod(pod))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call the networking plugin
|
// Call the networking plugin
|
||||||
err = dm.networkPlugin.SetUpPod(pod.Namespace, pod.Name, podInfraContainerID)
|
err = dm.networkPlugin.SetUpPod(pod.Namespace, pod.Name, podInfraContainerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
message := fmt.Sprintf("Failed to setup networking for pod %q using network plugins: %v; Skipping pod", podFullName, err)
|
message := fmt.Sprintf("Failed to setup networking for pod %q using network plugins: %v; Skipping pod", format.Pod(pod), err)
|
||||||
glog.Error(message)
|
glog.Error(message)
|
||||||
// Delete infra container
|
// Delete infra container
|
||||||
if delErr := dm.KillContainerInPod(kubecontainer.ContainerID{
|
if delErr := dm.KillContainerInPod(kubecontainer.ContainerID{
|
||||||
ID: string(podInfraContainerID),
|
ID: string(podInfraContainerID),
|
||||||
Type: "docker"}, nil, pod, message); delErr != nil {
|
Type: "docker"}, nil, pod, message); delErr != nil {
|
||||||
glog.Warningf("Clear infra container failed for pod %q: %v", podFullName, delErr)
|
glog.Warningf("Clear infra container failed for pod %q: %v", format.Pod(pod), delErr)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1839,12 +1834,12 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ kubecontainer.Pod, _ api.PodSta
|
||||||
// Setup the host interface unless the pod is on the host's network (FIXME: move to networkPlugin when ready)
|
// Setup the host interface unless the pod is on the host's network (FIXME: move to networkPlugin when ready)
|
||||||
podInfraContainer, err := dm.client.InspectContainer(string(podInfraContainerID))
|
podInfraContainer, err := dm.client.InspectContainer(string(podInfraContainerID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed to inspect pod infra container: %v; Skipping pod %q", err, podFullName)
|
glog.Errorf("Failed to inspect pod infra container: %v; Skipping pod %q", err, format.Pod(pod))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !(pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork) {
|
if !(pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork) {
|
||||||
if err = hairpin.SetUpContainer(podInfraContainer.State.Pid, "eth0"); err != nil {
|
if err = hairpin.SetUpContainer(podInfraContainer.State.Pid, "eth0"); err != nil {
|
||||||
glog.Warningf("Hairpin setup failed for pod %q: %v", podFullName, err)
|
glog.Warningf("Hairpin setup failed for pod %q: %v", format.Pod(pod), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1861,10 +1856,10 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ kubecontainer.Pod, _ api.PodSta
|
||||||
// containerChanges.StartInfraContainer causes the containers to be restarted for config reasons
|
// containerChanges.StartInfraContainer causes the containers to be restarted for config reasons
|
||||||
// ignore backoff
|
// ignore backoff
|
||||||
if !containerChanges.StartInfraContainer && dm.doBackOff(pod, container, podStatus, backOff) {
|
if !containerChanges.StartInfraContainer && dm.doBackOff(pod, container, podStatus, backOff) {
|
||||||
glog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, podFullName)
|
glog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
glog.V(4).Infof("Creating container %+v in pod %v", container, podFullName)
|
glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod))
|
||||||
err, msg := dm.imagePuller.PullImage(pod, container, pullSecrets)
|
err, msg := dm.imagePuller.PullImage(pod, container, pullSecrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dm.updateReasonCache(pod, container, err.Error(), errors.New(msg))
|
dm.updateReasonCache(pod, container, err.Error(), errors.New(msg))
|
||||||
|
|
|
@ -1583,8 +1583,6 @@ func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecontainer.Pod, updateType kubetypes.SyncPodType) (syncErr error) {
|
func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecontainer.Pod, updateType kubetypes.SyncPodType) (syncErr error) {
|
||||||
podFullName := kubecontainer.GetPodFullName(pod)
|
|
||||||
uid := pod.UID
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
var firstSeenTime time.Time
|
var firstSeenTime time.Time
|
||||||
if firstSeenTimeStr, ok := pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey]; !ok {
|
if firstSeenTimeStr, ok := pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey]; !ok {
|
||||||
|
@ -1597,7 +1595,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
||||||
defer func() {
|
defer func() {
|
||||||
status, err := kl.generatePodStatus(pod)
|
status, err := kl.generatePodStatus(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Unable to generate status for pod with name %q and uid %q info with error(%v)", podFullName, uid, err)
|
glog.Errorf("Unable to generate status for pod %q with error(%v)", format.Pod(pod), err)
|
||||||
// Propagate the error upstream.
|
// Propagate the error upstream.
|
||||||
syncErr = err
|
syncErr = err
|
||||||
} else {
|
} else {
|
||||||
|
@ -1620,18 +1618,19 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
||||||
|
|
||||||
// Create Mirror Pod for Static Pod if it doesn't already exist
|
// Create Mirror Pod for Static Pod if it doesn't already exist
|
||||||
if kubepod.IsStaticPod(pod) {
|
if kubepod.IsStaticPod(pod) {
|
||||||
|
podFullName := kubecontainer.GetPodFullName(pod)
|
||||||
if mirrorPod != nil && !kl.podManager.IsMirrorPodOf(mirrorPod, pod) {
|
if mirrorPod != nil && !kl.podManager.IsMirrorPodOf(mirrorPod, pod) {
|
||||||
// The mirror pod is semantically different from the static pod. Remove
|
// The mirror pod is semantically different from the static pod. Remove
|
||||||
// it. The mirror pod will get recreated later.
|
// it. The mirror pod will get recreated later.
|
||||||
glog.Errorf("Deleting mirror pod %q because it is outdated", podFullName)
|
glog.Errorf("Deleting mirror pod %q because it is outdated", format.Pod(mirrorPod))
|
||||||
if err := kl.podManager.DeleteMirrorPod(podFullName); err != nil {
|
if err := kl.podManager.DeleteMirrorPod(podFullName); err != nil {
|
||||||
glog.Errorf("Failed deleting mirror pod %q: %v", podFullName, err)
|
glog.Errorf("Failed deleting mirror pod %q: %v", format.Pod(mirrorPod), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if mirrorPod == nil {
|
if mirrorPod == nil {
|
||||||
glog.V(3).Infof("Creating a mirror pod for static pod %q", podFullName)
|
glog.V(3).Infof("Creating a mirror pod for static pod %q", format.Pod(pod))
|
||||||
if err := kl.podManager.CreateMirrorPod(pod); err != nil {
|
if err := kl.podManager.CreateMirrorPod(pod); err != nil {
|
||||||
glog.Errorf("Failed creating a mirror pod %q: %v", podFullName, err)
|
glog.Errorf("Failed creating a mirror pod for %q: %v", format.Pod(pod), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ok := kl.podManager.GetMirrorPodByPod(pod)
|
_, ok := kl.podManager.GetMirrorPodByPod(pod)
|
||||||
|
@ -1642,7 +1641,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := kl.makePodDataDirs(pod); err != nil {
|
if err := kl.makePodDataDirs(pod); err != nil {
|
||||||
glog.Errorf("Unable to make pod data directories for pod %q (uid %q): %v", podFullName, uid, err)
|
glog.Errorf("Unable to make pod data directories for pod %q: %v", format.Pod(pod), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1651,8 +1650,8 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ref, errGetRef := api.GetReference(pod)
|
ref, errGetRef := api.GetReference(pod)
|
||||||
if errGetRef == nil && ref != nil {
|
if errGetRef == nil && ref != nil {
|
||||||
kl.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", podFullName, err)
|
kl.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err)
|
||||||
glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", podFullName, err)
|
glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1694,11 +1693,11 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
||||||
Name: pod.Name,
|
Name: pod.Name,
|
||||||
Namespace: pod.Namespace,
|
Namespace: pod.Namespace,
|
||||||
}
|
}
|
||||||
glog.V(3).Infof("Not generating pod status for new pod %q", podFullName)
|
glog.V(3).Infof("Not generating pod status for new pod %q", format.Pod(pod))
|
||||||
} else {
|
} else {
|
||||||
podStatusPtr, apiPodStatusPtr, err := kl.containerRuntime.GetPodStatusAndAPIPodStatus(pod)
|
podStatusPtr, apiPodStatusPtr, err := kl.containerRuntime.GetPodStatusAndAPIPodStatus(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Unable to get status for pod %q (uid %q): %v", podFullName, uid, err)
|
glog.Errorf("Unable to get status for pod %q: %v", format.Pod(pod), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
apiPodStatus = *apiPodStatusPtr
|
apiPodStatus = *apiPodStatusPtr
|
||||||
|
@ -1707,7 +1706,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
||||||
|
|
||||||
pullSecrets, err := kl.getPullSecretsForPod(pod)
|
pullSecrets, err := kl.getPullSecretsForPod(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Unable to get pull secrets for pod %q (uid %q): %v", podFullName, uid, err)
|
glog.Errorf("Unable to get pull secrets for pod %q: %v", format.Pod(pod), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1900,8 +1899,7 @@ func (kl *Kubelet) cleanupTerminatedPods(pods []*api.Pod, runningPods []*kubecon
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if found {
|
if found {
|
||||||
podFullName := kubecontainer.GetPodFullName(pod)
|
glog.V(5).Infof("Keeping terminated pod %q, still running", format.Pod(pod))
|
||||||
glog.V(5).Infof("Keeping terminated pod %q and uid %q, still running", podFullName, pod.UID)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
terminating = append(terminating, pod)
|
terminating = append(terminating, pod)
|
||||||
|
@ -3116,8 +3114,7 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
||||||
metrics.PodStatusLatency.Observe(metrics.SinceInMicroseconds(start))
|
metrics.PodStatusLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
podFullName := kubecontainer.GetPodFullName(pod)
|
glog.V(3).Infof("Generating status for %q", format.Pod(pod))
|
||||||
glog.V(3).Infof("Generating status for %q", podFullName)
|
|
||||||
|
|
||||||
// TODO: Consider include the container information.
|
// TODO: Consider include the container information.
|
||||||
if kl.pastActiveDeadline(pod) {
|
if kl.pastActiveDeadline(pod) {
|
||||||
|
@ -3134,7 +3131,7 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Error handling
|
// Error handling
|
||||||
glog.Infof("Query container info for pod %q failed with error (%v)", podFullName, err)
|
glog.Infof("Query container info for pod %q failed with error (%v)", format.Pod(pod), err)
|
||||||
if strings.Contains(err.Error(), "resource temporarily unavailable") {
|
if strings.Contains(err.Error(), "resource temporarily unavailable") {
|
||||||
// Leave upstream layer to decide what to do
|
// Leave upstream layer to decide what to do
|
||||||
return api.PodStatus{}, err
|
return api.PodStatus{}, err
|
||||||
|
|
|
@ -979,8 +979,6 @@ func (r *Runtime) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) {
|
||||||
|
|
||||||
// SyncPod syncs the running pod to match the specified desired pod.
|
// SyncPod syncs the running pod to match the specified desired pod.
|
||||||
func (r *Runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, _ *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
|
func (r *Runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, _ *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
|
||||||
podFullName := format.Pod(pod)
|
|
||||||
|
|
||||||
// Add references to all containers.
|
// Add references to all containers.
|
||||||
unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container)
|
unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container)
|
||||||
for _, c := range runningPod.Containers {
|
for _, c := range runningPod.Containers {
|
||||||
|
@ -1008,14 +1006,14 @@ func (r *Runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus
|
||||||
// TODO(yifan): Take care of host network change.
|
// TODO(yifan): Take care of host network change.
|
||||||
containerChanged := c.Hash != 0 && c.Hash != expectedHash
|
containerChanged := c.Hash != 0 && c.Hash != expectedHash
|
||||||
if containerChanged {
|
if containerChanged {
|
||||||
glog.Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", podFullName, container.Name, c.Hash, expectedHash)
|
glog.Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", format.Pod(pod), container.Name, c.Hash, expectedHash)
|
||||||
restartPod = true
|
restartPod = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
liveness, found := r.livenessManager.Get(c.ID)
|
liveness, found := r.livenessManager.Get(c.ID)
|
||||||
if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever {
|
if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever {
|
||||||
glog.Infof("Pod %q container %q is unhealthy, it will be killed and re-created.", podFullName, container.Name)
|
glog.Infof("Pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name)
|
||||||
restartPod = true
|
restartPod = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -109,10 +110,9 @@ func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error {
|
||||||
}
|
}
|
||||||
glog.Infof("pod %q containers not running: syncing", pod.Name)
|
glog.Infof("pod %q containers not running: syncing", pod.Name)
|
||||||
|
|
||||||
podFullName := kubecontainer.GetPodFullName(pod)
|
glog.Infof("Creating a mirror pod for static pod %q", format.Pod(pod))
|
||||||
glog.Infof("Creating a mirror pod for static pod %q", podFullName)
|
|
||||||
if err := kl.podManager.CreateMirrorPod(pod); err != nil {
|
if err := kl.podManager.CreateMirrorPod(pod); err != nil {
|
||||||
glog.Errorf("Failed creating a mirror pod %q: %v", podFullName, err)
|
glog.Errorf("Failed creating a mirror pod %q: %v", format.Pod(pod), err)
|
||||||
}
|
}
|
||||||
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue