Merge pull request #20205 from Random-Liu/remove-old-convert-function-and-reason-cache

Auto commit by PR queue bot
pull/6/head
k8s-merge-robot 2016-02-05 13:15:36 -08:00
commit b4c16ef9eb
7 changed files with 4 additions and 313 deletions

View File

@ -245,15 +245,6 @@ func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*PodS
return &status, f.Err
}
func (f *FakeRuntime) ConvertPodStatusToAPIPodStatus(_ *api.Pod, _ *PodStatus) (*api.PodStatus, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "ConvertPodStatusToAPIPodStatus")
status := f.APIPodStatus
return &status, f.Err
}
func (f *FakeRuntime) ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
f.Lock()
defer f.Unlock()

View File

@ -72,12 +72,6 @@ type Runtime interface {
// GetPodStatus retrieves the status of the pod, including the
// information of all containers in the pod that are visble in Runtime.
GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error)
// ConvertPodStatusToAPIPodStatus converts the PodStatus object to api.PodStatus.
// This function is needed because Docker generates some high-level and/or
// pod-level information for api.PodStatus (e.g., check whether the image
// exists to determine the reason). We should try generalizing the logic
// for all container runtimes in kubelet and remove this funciton.
ConvertPodStatusToAPIPodStatus(*api.Pod, *PodStatus) (*api.PodStatus, error)
// PullImage pulls an image from the network to local storage using the supplied
// secrets if necessary.
PullImage(image ImageSpec, pullSecrets []api.Secret) error

View File

@ -82,11 +82,6 @@ func (r *Mock) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus,
return args.Get(0).(*PodStatus), args.Error(1)
}
func (r *Mock) ConvertPodStatusToAPIPodStatus(pod *api.Pod, podStatus *PodStatus) (*api.PodStatus, error) {
args := r.Called(pod, podStatus)
return args.Get(0).(*api.PodStatus), args.Error(1)
}
func (r *Mock) ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
args := r.Called(containerID, cmd, stdin, stdout, stderr, tty)
return args.Error(0)

View File

@ -21,8 +21,6 @@ import (
"strings"
docker "github.com/fsouza/go-dockerclient"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
@ -83,31 +81,3 @@ func toRuntimeImage(image *docker.APIImages) (*kubecontainer.Image, error) {
Size: image.VirtualSize,
}, nil
}
// convert ContainerStatus to api.ContainerStatus.
func containerStatusToAPIContainerStatus(containerStatus *kubecontainer.ContainerStatus) *api.ContainerStatus {
containerID := DockerPrefix + containerStatus.ID.ID
status := api.ContainerStatus{
Name: containerStatus.Name,
RestartCount: containerStatus.RestartCount,
Image: containerStatus.Image,
ImageID: containerStatus.ImageID,
ContainerID: containerID,
}
switch containerStatus.State {
case kubecontainer.ContainerStateRunning:
status.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.NewTime(containerStatus.StartedAt)}
case kubecontainer.ContainerStateExited:
status.State.Terminated = &api.ContainerStateTerminated{
ExitCode: containerStatus.ExitCode,
Reason: containerStatus.Reason,
Message: containerStatus.Message,
StartedAt: unversioned.NewTime(containerStatus.StartedAt),
FinishedAt: unversioned.NewTime(containerStatus.FinishedAt),
ContainerID: containerID,
}
default:
status.State.Waiting = &api.ContainerStateWaiting{}
}
return &status
}

View File

@ -25,7 +25,6 @@ import (
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"sync"
@ -34,7 +33,6 @@ import (
"github.com/coreos/go-semver/semver"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"github.com/golang/groupcache/lru"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
@ -55,7 +53,6 @@ import (
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
)
@ -64,8 +61,6 @@ const (
MinimumDockerAPIVersion = "1.18"
maxReasonCacheEntries = 200
// ndots specifies the minimum number of dots that a domain name must contain for the resolver to consider it as FQDN (fully-qualified)
// we want to able to consider SRV lookup names like _dns._udp.kube-dns.default.svc to be considered relative.
// hence, setting ndots to be 5.
@ -100,15 +95,7 @@ type DockerManager struct {
// The image name of the pod infra container.
podInfraContainerImage string
// reasonCache stores the failure reason of the last container creation
// and/or start in a string, keyed by <pod_UID>_<container_name>. The goal
// is to propagate this reason to the container status. This endeavor is
// "best-effort" for two reasons:
// 1. The cache is not persisted.
// 2. We use an LRU cache to avoid extra garbage collection work. This
// means that some entries may be recycled before a pod has been
// deleted.
reasonCache reasonInfoCache
// TODO(yifan): Record the pull failure so we can eliminate the image checking?
// Lower level docker image puller.
dockerPuller DockerPuller
@ -187,8 +174,6 @@ func NewDockerManager(
glog.Infof("Setting dockerRoot to %s", dockerRoot)
}
reasonCache := reasonInfoCache{cache: lru.New(maxReasonCacheEntries)}
dm := &DockerManager{
client: client,
recorder: recorder,
@ -196,7 +181,6 @@ func NewDockerManager(
os: osInterface,
machineInfo: machineInfo,
podInfraContainerImage: podInfraContainerImage,
reasonCache: reasonCache,
dockerPuller: newDockerPuller(client, qps, burst),
dockerRoot: dockerRoot,
containerLogsDir: containerLogsDir,
@ -220,43 +204,6 @@ func NewDockerManager(
return dm
}
// A cache which stores strings keyed by <pod_UID>_<container_name>.
type reasonInfoCache struct {
lock sync.RWMutex
cache *lru.Cache
}
type reasonInfo struct {
reason string
message string
}
func (sc *reasonInfoCache) composeKey(uid types.UID, name string) string {
return fmt.Sprintf("%s_%s", uid, name)
}
func (sc *reasonInfoCache) Add(uid types.UID, name string, reason, message string) {
sc.lock.Lock()
defer sc.lock.Unlock()
sc.cache.Add(sc.composeKey(uid, name), reasonInfo{reason, message})
}
func (sc *reasonInfoCache) Remove(uid types.UID, name string) {
sc.lock.Lock()
defer sc.lock.Unlock()
sc.cache.Remove(sc.composeKey(uid, name))
}
func (sc *reasonInfoCache) Get(uid types.UID, name string) (reasonInfo, bool) {
sc.lock.RLock()
defer sc.lock.RUnlock()
value, ok := sc.cache.Get(sc.composeKey(uid, name))
if ok {
return value.(reasonInfo), ok
} else {
return reasonInfo{"", ""}, ok
}
}
// GetContainerLogs returns logs of a specific container. By
// default, it returns a snapshot of the container log. Set 'follow' to true to
// stream the log. Set 'follow' to false and specify the number of lines (e.g.
@ -410,112 +357,6 @@ func (dm *DockerManager) inspectContainer(id string, podName, podNamespace strin
return &status, "", nil
}
func (dm *DockerManager) ConvertPodStatusToAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) (*api.PodStatus, error) {
var apiPodStatus api.PodStatus
uid := pod.UID
statuses := make(map[string]*api.ContainerStatus, len(pod.Spec.Containers))
// Create a map of expected containers based on the pod spec.
expectedContainers := make(map[string]api.Container)
for _, container := range pod.Spec.Containers {
expectedContainers[container.Name] = container
}
containerDone := sets.NewString()
apiPodStatus.PodIP = podStatus.IP
for _, containerStatus := range podStatus.ContainerStatuses {
cName := containerStatus.Name
if _, ok := expectedContainers[cName]; !ok {
// This would also ignore the infra container.
continue
}
if containerDone.Has(cName) {
continue
}
status := containerStatusToAPIContainerStatus(containerStatus)
if existing, found := statuses[cName]; found {
existing.LastTerminationState = status.State
containerDone.Insert(cName)
} else {
statuses[cName] = status
}
}
// Handle the containers for which we cannot find any associated active or dead docker containers or are in restart backoff
// Fetch old containers statuses from old pod status.
oldStatuses := make(map[string]api.ContainerStatus, len(pod.Spec.Containers))
for _, status := range pod.Status.ContainerStatuses {
oldStatuses[status.Name] = status
}
for _, container := range pod.Spec.Containers {
if containerStatus, found := statuses[container.Name]; found {
reasonInfo, ok := dm.reasonCache.Get(uid, container.Name)
if ok && reasonInfo.reason == kubecontainer.ErrCrashLoopBackOff.Error() {
containerStatus.LastTerminationState = containerStatus.State
containerStatus.State = api.ContainerState{
Waiting: &api.ContainerStateWaiting{
Reason: reasonInfo.reason,
Message: reasonInfo.message,
},
}
}
continue
}
var containerStatus api.ContainerStatus
containerStatus.Name = container.Name
containerStatus.Image = container.Image
if oldStatus, found := oldStatuses[container.Name]; found {
// Some states may be lost due to GC; apply the last observed
// values if possible.
containerStatus.RestartCount = oldStatus.RestartCount
containerStatus.LastTerminationState = oldStatus.LastTerminationState
}
// TODO(dchen1107): docker/docker/issues/8365 to figure out if the image exists
reasonInfo, ok := dm.reasonCache.Get(uid, container.Name)
if !ok {
// default position for a container
// At this point there are no active or dead containers, the reasonCache is empty (no entry or the entry has expired)
// its reasonable to say the container is being created till a more accurate reason is logged
containerStatus.State = api.ContainerState{
Waiting: &api.ContainerStateWaiting{
Reason: fmt.Sprintf("ContainerCreating"),
Message: fmt.Sprintf("Image: %s is ready, container is creating", container.Image),
},
}
} else if reasonInfo.reason == kubecontainer.ErrImagePullBackOff.Error() ||
reasonInfo.reason == kubecontainer.ErrImageInspect.Error() ||
reasonInfo.reason == kubecontainer.ErrImagePull.Error() ||
reasonInfo.reason == kubecontainer.ErrImageNeverPull.Error() {
// mark it as waiting, reason will be filled bellow
containerStatus.State = api.ContainerState{Waiting: &api.ContainerStateWaiting{}}
} else if reasonInfo.reason == kubecontainer.ErrRunContainer.Error() {
// mark it as waiting, reason will be filled bellow
containerStatus.State = api.ContainerState{Waiting: &api.ContainerStateWaiting{}}
}
statuses[container.Name] = &containerStatus
}
apiPodStatus.ContainerStatuses = make([]api.ContainerStatus, 0)
for containerName, status := range statuses {
if status.State.Waiting != nil {
status.State.Running = nil
// For containers in the waiting state, fill in a specific reason if it is recorded.
if reasonInfo, ok := dm.reasonCache.Get(uid, containerName); ok {
status.State.Waiting.Reason = reasonInfo.reason
status.State.Waiting.Message = reasonInfo.message
}
}
apiPodStatus.ContainerStatuses = append(apiPodStatus.ContainerStatuses, *status)
}
// Sort the container statuses since clients of this interface expect the list
// of containers in a pod to behave like the output of `docker list`, which has a
// deterministic order.
sort.Sort(kubetypes.SortedContainerStatuses(apiPodStatus.ContainerStatuses))
return &apiPodStatus, nil
}
// makeEnvList converts EnvVar list to a list of strings, in the form of
// '<key>=<value>', which can be understood by docker.
func makeEnvList(envs []kubecontainer.EnvVar) (result []string) {
@ -1790,20 +1631,6 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
}, nil
}
// updateReasonCache updates the failure reason based on the registered error.
func (dm *DockerManager) updateReasonCache(pod *api.Pod, container *api.Container, briefError string, err error) {
if briefError == "" || err == nil {
return
}
errString := err.Error()
dm.reasonCache.Add(pod.UID, container.Name, briefError, errString)
}
// clearReasonCache removes the entry in the reason cache.
func (dm *DockerManager) clearReasonCache(pod *api.Pod, container *api.Container) {
dm.reasonCache.Remove(pod.UID, container.Name)
}
// Sync the running pod to match the specified desired pod.
func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) (result kubecontainer.PodSyncResult) {
start := time.Now()
@ -1944,13 +1771,11 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
err, msg := dm.imagePuller.PullImage(pod, container, pullSecrets)
if err != nil {
startContainerResult.Fail(err, msg)
dm.updateReasonCache(pod, container, err.Error(), errors.New(msg))
continue
}
if container.SecurityContext != nil && container.SecurityContext.RunAsNonRoot != nil && *container.SecurityContext.RunAsNonRoot {
err := dm.verifyNonRoot(container)
dm.updateReasonCache(pod, container, kubecontainer.ErrVerifyNonRoot.Error(), err)
if err != nil {
startContainerResult.Fail(kubecontainer.ErrVerifyNonRoot, err.Error())
glog.Errorf("Error running pod %q container %q: %v", format.Pod(pod), container.Name, err)
@ -1971,7 +1796,6 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
// See createPodInfraContainer for infra container setup.
namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID)
_, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode, getPidMode(pod), restartCount)
dm.updateReasonCache(pod, container, kubecontainer.ErrRunContainer.Error(), err)
if err != nil {
startContainerResult.Fail(kubecontainer.ErrRunContainer, err.Error())
// TODO(bburns) : Perhaps blacklist a container after N failures?
@ -1979,8 +1803,6 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
continue
}
// Successfully started the container; clear the entry in the failure
// reason cache.
dm.clearReasonCache(pod, container)
}
return
}
@ -2063,14 +1885,12 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt
dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.BackOffStartContainer, "Back-off restarting failed docker container")
}
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, format.Pod(pod))
dm.updateReasonCache(pod, container, kubecontainer.ErrCrashLoopBackOff.Error(), err)
glog.Infof("%s", err.Error())
return true, kubecontainer.ErrCrashLoopBackOff, err.Error()
}
backOff.Next(stableName, ts)
}
dm.clearReasonCache(pod, container)
return false, nil, ""
}

View File

@ -550,17 +550,14 @@ func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, p
if err != nil {
t.Errorf("unexpected error: %v", err)
}
var apiPodStatus *api.PodStatus
apiPodStatus, err = dm.ConvertPodStatusToAPIPodStatus(pod, podStatus)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
fakeDocker.ClearCalls()
if backOff == nil {
backOff = util.NewBackOff(time.Second, time.Minute)
}
result := dm.SyncPod(pod, *apiPodStatus, podStatus, []api.Secret{}, backOff)
// TODO(random-liu): Add test for PodSyncResult
// api.PodStatus is not used in SyncPod now, pass in an empty one.
result := dm.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff)
err = result.Error()
if err != nil && !expectErr {
t.Errorf("unexpected error: %v", err)

View File

@ -25,7 +25,6 @@ import (
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"syscall"
@ -39,7 +38,6 @@ import (
"golang.org/x/net/context"
"google.golang.org/grpc"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -1490,77 +1488,3 @@ type sortByRestartCount []*kubecontainer.ContainerStatus
func (s sortByRestartCount) Len() int { return len(s) }
func (s sortByRestartCount) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s sortByRestartCount) Less(i, j int) bool { return s[i].RestartCount < s[j].RestartCount }
// TODO(yifan): Delete this function when the logic is moved to kubelet.
func (r *Runtime) ConvertPodStatusToAPIPodStatus(pod *api.Pod, status *kubecontainer.PodStatus) (*api.PodStatus, error) {
apiPodStatus := &api.PodStatus{PodIP: status.IP}
// Sort in the reverse order of the restart count because the
// lastest one will have the largest restart count.
sort.Sort(sort.Reverse(sortByRestartCount(status.ContainerStatuses)))
containerStatuses := make(map[string]*api.ContainerStatus)
for _, c := range status.ContainerStatuses {
var st api.ContainerState
switch c.State {
case kubecontainer.ContainerStateRunning:
st.Running = &api.ContainerStateRunning{
StartedAt: unversioned.NewTime(c.StartedAt),
}
case kubecontainer.ContainerStateExited:
if pod.Spec.RestartPolicy == api.RestartPolicyAlways ||
pod.Spec.RestartPolicy == api.RestartPolicyOnFailure && c.ExitCode != 0 {
// TODO(yifan): Add reason and message.
st.Waiting = &api.ContainerStateWaiting{}
break
}
st.Terminated = &api.ContainerStateTerminated{
ExitCode: c.ExitCode,
StartedAt: unversioned.NewTime(c.StartedAt),
Reason: c.Reason,
Message: c.Message,
// TODO(yifan): Add finishedAt, signal.
ContainerID: c.ID.String(),
}
default:
// Unknown state.
// TODO(yifan): Add reason and message.
st.Waiting = &api.ContainerStateWaiting{}
}
status, ok := containerStatuses[c.Name]
if !ok {
containerStatuses[c.Name] = &api.ContainerStatus{
Name: c.Name,
Image: c.Image,
ImageID: c.ImageID,
ContainerID: c.ID.String(),
RestartCount: c.RestartCount,
State: st,
}
continue
}
// Found multiple container statuses, fill that as last termination state.
if status.LastTerminationState.Waiting == nil &&
status.LastTerminationState.Running == nil &&
status.LastTerminationState.Terminated == nil {
status.LastTerminationState = st
}
}
for _, c := range pod.Spec.Containers {
cs, ok := containerStatuses[c.Name]
if !ok {
cs = &api.ContainerStatus{
Name: c.Name,
Image: c.Image,
// TODO(yifan): Add reason and message.
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{}},
}
}
apiPodStatus.ContainerStatuses = append(apiPodStatus.ContainerStatuses, *cs)
}
return apiPodStatus, nil
}