mirror of https://github.com/k3s-io/k3s
Kubelet: remove the getPodstatus method
Pod statuses are periodically writtien to the status manager, and status manager sets the start time of the pod. All non-status-modifying code should perform cache lookup and should not attempt to generate pod status on its own.pull/6/head
parent
9298638658
commit
25668ccc11
|
@ -1587,9 +1587,12 @@ func (kl *Kubelet) validateContainerStatus(podStatus *api.PodStatus, containerNa
|
||||||
// or all of them.
|
// or all of them.
|
||||||
func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName, tail string, follow, previous bool, stdout, stderr io.Writer) error {
|
func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName, tail string, follow, previous bool, stdout, stderr io.Writer) error {
|
||||||
// TODO(vmarmol): Refactor to not need the pod status and verification.
|
// TODO(vmarmol): Refactor to not need the pod status and verification.
|
||||||
podStatus, err := kl.getPodStatus(podFullName)
|
// Pod workers periodically write status to statusManager. If status is not
|
||||||
if err != nil {
|
// cached there, something is wrong (or kubelet just restarted and hasn't
|
||||||
return fmt.Errorf("failed to get status for pod %q - %v", podFullName, err)
|
// caught up yet). Just assume the pod is not ready yet.
|
||||||
|
podStatus, found := kl.statusManager.GetPodStatus(podFullName)
|
||||||
|
if !found {
|
||||||
|
return fmt.Errorf("failed to get status for pod %q", podFullName)
|
||||||
}
|
}
|
||||||
if err := kl.validatePodPhase(&podStatus); err != nil {
|
if err := kl.validatePodPhase(&podStatus); err != nil {
|
||||||
// No log is available if pod is not in a "known" phase (e.g. Unknown).
|
// No log is available if pod is not in a "known" phase (e.g. Unknown).
|
||||||
|
@ -1913,22 +1916,6 @@ func getPodReadyCondition(spec *api.PodSpec, statuses []api.ContainerStatus) []a
|
||||||
return ready
|
return ready
|
||||||
}
|
}
|
||||||
|
|
||||||
// getPodStatus returns information of the containers in the pod from the
|
|
||||||
// container runtime.
|
|
||||||
func (kl *Kubelet) getPodStatus(podFullName string) (api.PodStatus, error) {
|
|
||||||
// Check to see if we have a cached version of the status.
|
|
||||||
cachedPodStatus, found := kl.statusManager.GetPodStatus(podFullName)
|
|
||||||
if found {
|
|
||||||
glog.V(3).Infof("Returning cached status for %q", podFullName)
|
|
||||||
return cachedPodStatus, nil
|
|
||||||
}
|
|
||||||
pod, found := kl.GetPodByFullName(podFullName)
|
|
||||||
if !found {
|
|
||||||
return api.PodStatus{}, fmt.Errorf("couldn't find pod %q", podFullName)
|
|
||||||
}
|
|
||||||
return kl.generatePodStatus(pod)
|
|
||||||
}
|
|
||||||
|
|
||||||
// By passing the pod directly, this method avoids pod lookup, which requires
|
// By passing the pod directly, this method avoids pod lookup, which requires
|
||||||
// grabbing a lock.
|
// grabbing a lock.
|
||||||
func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
||||||
|
|
|
@ -3010,19 +3010,9 @@ func TestHandlePortConflicts(t *testing.T) {
|
||||||
|
|
||||||
kl.handleNotFittingPods(pods)
|
kl.handleNotFittingPods(pods)
|
||||||
// Check pod status stored in the status map.
|
// Check pod status stored in the status map.
|
||||||
status, err := kl.getPodStatus(conflictedPodName)
|
status, found := kl.statusManager.GetPodStatus(conflictedPodName)
|
||||||
if err != nil {
|
if !found {
|
||||||
t.Fatalf("status of pod %q is not found in the status map: %#v", conflictedPodName, err)
|
t.Fatalf("status of pod %q is not found in the status map", conflictedPodName)
|
||||||
}
|
|
||||||
if status.Phase != api.PodFailed {
|
|
||||||
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we can retrieve the pod status from GetPodStatus().
|
|
||||||
kl.podManager.SetPods(pods)
|
|
||||||
status, err = kl.getPodStatus(conflictedPodName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to retrieve pod status for pod %q: %#v.", conflictedPodName, err)
|
|
||||||
}
|
}
|
||||||
if status.Phase != api.PodFailed {
|
if status.Phase != api.PodFailed {
|
||||||
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
|
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
|
||||||
|
@ -3062,19 +3052,9 @@ func TestHandleNodeSelector(t *testing.T) {
|
||||||
|
|
||||||
kl.handleNotFittingPods(pods)
|
kl.handleNotFittingPods(pods)
|
||||||
// Check pod status stored in the status map.
|
// Check pod status stored in the status map.
|
||||||
status, err := kl.getPodStatus(notfittingPodName)
|
status, found := kl.statusManager.GetPodStatus(notfittingPodName)
|
||||||
if err != nil {
|
if !found {
|
||||||
t.Fatalf("status of pod %q is not found in the status map: %#v", notfittingPodName, err)
|
t.Fatalf("status of pod %q is not found in the status map", notfittingPodName)
|
||||||
}
|
|
||||||
if status.Phase != api.PodFailed {
|
|
||||||
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we can retrieve the pod status from GetPodStatus().
|
|
||||||
kl.podManager.SetPods(pods)
|
|
||||||
status, err = kl.getPodStatus(notfittingPodName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to retrieve pod status for pod %q: %#v.", notfittingPodName, err)
|
|
||||||
}
|
}
|
||||||
if status.Phase != api.PodFailed {
|
if status.Phase != api.PodFailed {
|
||||||
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
|
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
|
||||||
|
@ -3120,19 +3100,9 @@ func TestHandleMemExceeded(t *testing.T) {
|
||||||
|
|
||||||
kl.handleNotFittingPods(pods)
|
kl.handleNotFittingPods(pods)
|
||||||
// Check pod status stored in the status map.
|
// Check pod status stored in the status map.
|
||||||
status, err := kl.getPodStatus(notfittingPodName)
|
status, found := kl.statusManager.GetPodStatus(notfittingPodName)
|
||||||
if err != nil {
|
if !found {
|
||||||
t.Fatalf("status of pod %q is not found in the status map: %#v", notfittingPodName, err)
|
t.Fatalf("status of pod %q is not found in the status map", notfittingPodName)
|
||||||
}
|
|
||||||
if status.Phase != api.PodFailed {
|
|
||||||
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we can retrieve the pod status from GetPodStatus().
|
|
||||||
kl.podManager.SetPods(pods)
|
|
||||||
status, err = kl.getPodStatus(notfittingPodName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to retrieve pod status for pod %q: %#v.", notfittingPodName, err)
|
|
||||||
}
|
}
|
||||||
if status.Phase != api.PodFailed {
|
if status.Phase != api.PodFailed {
|
||||||
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
|
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
|
||||||
|
@ -3153,13 +3123,13 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
||||||
}
|
}
|
||||||
// Run once to populate the status map.
|
// Run once to populate the status map.
|
||||||
kl.handleNotFittingPods(pods)
|
kl.handleNotFittingPods(pods)
|
||||||
if _, err := kl.getPodStatus(kubecontainer.BuildPodFullName("pod2", "")); err != nil {
|
if _, found := kl.statusManager.GetPodStatus(kubecontainer.BuildPodFullName("pod2", "")); !found {
|
||||||
t.Fatalf("expected to have status cached for %q: %v", "pod2", err)
|
t.Fatalf("expected to have status cached for pod2")
|
||||||
}
|
}
|
||||||
// Sync with empty pods so that the entry in status map will be removed.
|
// Sync with empty pods so that the entry in status map will be removed.
|
||||||
kl.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now())
|
kl.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now())
|
||||||
if _, err := kl.getPodStatus(kubecontainer.BuildPodFullName("pod2", "")); err == nil {
|
if _, found := kl.statusManager.GetPodStatus(kubecontainer.BuildPodFullName("pod2", "")); found {
|
||||||
t.Fatalf("expected to not have status cached for %q: %v", "pod2", err)
|
t.Fatalf("expected to not have status cached for pod2")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4165,11 +4135,11 @@ func TestGetPodStatusWithLastTermination(t *testing.T) {
|
||||||
t.Errorf("%d: unexpected error: %v", i, err)
|
t.Errorf("%d: unexpected error: %v", i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we can retrieve the pod status from GetPodStatus().
|
// Check if we can retrieve the pod status.
|
||||||
podName := kubecontainer.GetPodFullName(pods[0])
|
podName := kubecontainer.GetPodFullName(pods[0])
|
||||||
status, err := kubelet.getPodStatus(podName)
|
status, found := kubelet.statusManager.GetPodStatus(podName)
|
||||||
if err != nil {
|
if !found {
|
||||||
t.Fatalf("unable to retrieve pod status for pod %q: %#v.", podName, err)
|
t.Fatalf("unable to retrieve pod status for pod %q.", podName)
|
||||||
} else {
|
} else {
|
||||||
terminatedContainers := []string{}
|
terminatedContainers := []string{}
|
||||||
for _, cs := range status.ContainerStatuses {
|
for _, cs := range status.ContainerStatuses {
|
||||||
|
@ -4240,9 +4210,9 @@ func TestGetPodCreationFailureReason(t *testing.T) {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
status, err := kubelet.getPodStatus(kubecontainer.GetPodFullName(pod))
|
status, found := kubelet.statusManager.GetPodStatus(kubecontainer.GetPodFullName(pod))
|
||||||
if err != nil {
|
if !found {
|
||||||
t.Errorf("unexpected error %v", err)
|
t.Fatalf("unexpected error %v", err)
|
||||||
}
|
}
|
||||||
if len(status.ContainerStatuses) < 1 {
|
if len(status.ContainerStatuses) < 1 {
|
||||||
t.Errorf("expected 1 container status, got %d", len(status.ContainerStatuses))
|
t.Errorf("expected 1 container status, got %d", len(status.ContainerStatuses))
|
||||||
|
@ -4306,9 +4276,9 @@ func TestGetPodPullImageFailureReason(t *testing.T) {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
status, err := kubelet.getPodStatus(kubecontainer.GetPodFullName(pod))
|
status, found := kubelet.statusManager.GetPodStatus(kubecontainer.GetPodFullName(pod))
|
||||||
if err != nil {
|
if !found {
|
||||||
t.Errorf("unexpected error %v", err)
|
t.Errorf("expected status of pod %q to be found", kubecontainer.GetPodFullName(pod))
|
||||||
}
|
}
|
||||||
if len(status.ContainerStatuses) < 1 {
|
if len(status.ContainerStatuses) < 1 {
|
||||||
t.Errorf("expected 1 container status, got %d", len(status.ContainerStatuses))
|
t.Errorf("expected 1 container status, got %d", len(status.ContainerStatuses))
|
||||||
|
|
Loading…
Reference in New Issue