From b1f91bd510b680e7d455623225ca2da5804ad3e2 Mon Sep 17 00:00:00 2001 From: Paul Morie Date: Fri, 23 Sep 2016 11:58:44 -0400 Subject: [PATCH] Move Kubelet pod-management code into kubelet_pods.go --- pkg/kubelet/kubelet.go | 1175 +------------------- pkg/kubelet/kubelet_pods.go | 1210 +++++++++++++++++++++ pkg/kubelet/kubelet_pods_test.go | 1264 ++++++++++++++++++++++ pkg/kubelet/kubelet_test.go | 1531 --------------------------- pkg/kubelet/kubelet_volumes_test.go | 302 ++++++ 5 files changed, 2778 insertions(+), 2704 deletions(-) create mode 100644 pkg/kubelet/kubelet_pods.go create mode 100644 pkg/kubelet/kubelet_pods_test.go diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 1a5948ab81..5a5662f37f 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -17,15 +17,11 @@ limitations under the License. package kubelet import ( - "bytes" "fmt" - "io" - "io/ioutil" "net" "net/http" "os" "path" - "path/filepath" "sort" "strings" "sync" @@ -35,17 +31,13 @@ import ( "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" "k8s.io/kubernetes/pkg/api" - utilpod "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/apis/componentconfig" kubeExternal "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/fieldpath" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cm" @@ -53,7 +45,6 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim" "k8s.io/kubernetes/pkg/kubelet/dockertools" - "k8s.io/kubernetes/pkg/kubelet/envvars" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/eviction" "k8s.io/kubernetes/pkg/kubelet/images" @@ -73,7 +64,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/sysctl" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" - "k8s.io/kubernetes/pkg/kubelet/util/ioutils" "k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils" "k8s.io/kubernetes/pkg/kubelet/volumemanager" @@ -96,15 +86,10 @@ import ( "k8s.io/kubernetes/pkg/util/procfs" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/term" - utilvalidation "k8s.io/kubernetes/pkg/util/validation" - "k8s.io/kubernetes/pkg/util/validation/field" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" "k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/third_party/forked/golang/expansion" ) const ( @@ -1098,21 +1083,6 @@ func (kl *Kubelet) setupDataDirs() error { return nil } -// Get a list of pods that have data directories. -func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) { - podInfos, err := ioutil.ReadDir(kl.getPodsDir()) - if err != nil { - return nil, err - } - pods := []types.UID{} - for i := range podInfos { - if podInfos[i].IsDir() { - pods = append(pods, types.UID(podInfos[i].Name())) - } - } - return pods, nil -} - // Starts garbage collection threads. func (kl *Kubelet) StartGarbageCollection() { loggedContainerGCFailure := false @@ -1248,423 +1218,6 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { kl.syncLoop(updates, kl) } -// getActivePods returns non-terminal pods -func (kl *Kubelet) getActivePods() []*api.Pod { - allPods := kl.podManager.GetPods() - activePods := kl.filterOutTerminatedPods(allPods) - return activePods -} - -// makeMounts determines the mount points for the given container. -func makeMounts(pod *api.Pod, podDir string, container *api.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) { - // Kubernetes only mounts on /etc/hosts if : - // - container does not use hostNetwork and - // - container is not an infrastructure(pause) container - // - container is not already mounting on /etc/hosts - // When the pause container is being created, its IP is still unknown. Hence, PodIP will not have been set. - mountEtcHostsFile := (pod.Spec.SecurityContext == nil || !pod.Spec.SecurityContext.HostNetwork) && len(podIP) > 0 - glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile) - mounts := []kubecontainer.Mount{} - for _, mount := range container.VolumeMounts { - mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath) - vol, ok := podVolumes[mount.Name] - if !ok { - glog.Warningf("Mount cannot be satisfied for container %q, because the volume is missing: %q", container.Name, mount) - continue - } - - relabelVolume := false - // If the volume supports SELinux and it has not been - // relabeled already and it is not a read-only volume, - // relabel it and mark it as labeled - if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SupportsSELinux && !vol.SELinuxLabeled { - vol.SELinuxLabeled = true - relabelVolume = true - } - hostPath, err := volume.GetPath(vol.Mounter) - if err != nil { - return nil, err - } - if mount.SubPath != "" { - hostPath = filepath.Join(hostPath, mount.SubPath) - } - mounts = append(mounts, kubecontainer.Mount{ - Name: mount.Name, - ContainerPath: mount.MountPath, - HostPath: hostPath, - ReadOnly: mount.ReadOnly, - SELinuxRelabel: relabelVolume, - }) - } - if mountEtcHostsFile { - hostsMount, err := makeHostsMount(podDir, podIP, hostName, hostDomain) - if err != nil { - return nil, err - } - mounts = append(mounts, *hostsMount) - } - return mounts, nil -} - -// makeHostsMount makes the mountpoint for the hosts file that the containers -// in a pod are injected with. -func makeHostsMount(podDir, podIP, hostName, hostDomainName string) (*kubecontainer.Mount, error) { - hostsFilePath := path.Join(podDir, "etc-hosts") - if err := ensureHostsFile(hostsFilePath, podIP, hostName, hostDomainName); err != nil { - return nil, err - } - return &kubecontainer.Mount{ - Name: "k8s-managed-etc-hosts", - ContainerPath: etcHostsPath, - HostPath: hostsFilePath, - ReadOnly: false, - }, nil -} - -// ensureHostsFile ensures that the given host file has an up-to-date ip, host -// name, and domain name. -func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string) error { - if _, err := os.Stat(fileName); os.IsExist(err) { - glog.V(4).Infof("kubernetes-managed etc-hosts file exits. Will not be recreated: %q", fileName) - return nil - } - var buffer bytes.Buffer - buffer.WriteString("# Kubernetes-managed hosts file.\n") - buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost - buffer.WriteString("::1\tlocalhost ip6-localhost ip6-loopback\n") // ipv6 localhost - buffer.WriteString("fe00::0\tip6-localnet\n") - buffer.WriteString("fe00::0\tip6-mcastprefix\n") - buffer.WriteString("fe00::1\tip6-allnodes\n") - buffer.WriteString("fe00::2\tip6-allrouters\n") - if len(hostDomainName) > 0 { - buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName)) - } else { - buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName)) - } - return ioutil.WriteFile(fileName, buffer.Bytes(), 0644) -} - -func makePortMappings(container *api.Container) (ports []kubecontainer.PortMapping) { - names := make(map[string]struct{}) - for _, p := range container.Ports { - pm := kubecontainer.PortMapping{ - HostPort: int(p.HostPort), - ContainerPort: int(p.ContainerPort), - Protocol: p.Protocol, - HostIP: p.HostIP, - } - - // We need to create some default port name if it's not specified, since - // this is necessary for rkt. - // http://issue.k8s.io/7710 - if p.Name == "" { - pm.Name = fmt.Sprintf("%s-%s:%d", container.Name, p.Protocol, p.ContainerPort) - } else { - pm.Name = fmt.Sprintf("%s-%s", container.Name, p.Name) - } - - // Protect against exposing the same protocol-port more than once in a container. - if _, ok := names[pm.Name]; ok { - glog.Warningf("Port name conflicted, %q is defined more than once", pm.Name) - continue - } - ports = append(ports, pm) - names[pm.Name] = struct{}{} - } - return -} - -// GeneratePodHostNameAndDomain creates a hostname and domain name for a pod, -// given that pod's spec and annotations or returns an error. -func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, error) { - // TODO(vmarmol): Handle better. - // Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char). - clusterDomain := kl.clusterDomain - const hostnameMaxLen = 63 - podAnnotations := pod.Annotations - if podAnnotations == nil { - podAnnotations = make(map[string]string) - } - hostname := pod.Name - if len(pod.Spec.Hostname) > 0 { - if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Hostname); len(msgs) != 0 { - return "", "", fmt.Errorf("Pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";")) - } - hostname = pod.Spec.Hostname - } else { - hostnameCandidate := podAnnotations[utilpod.PodHostnameAnnotation] - if len(utilvalidation.IsDNS1123Label(hostnameCandidate)) == 0 { - // use hostname annotation, if specified. - hostname = hostnameCandidate - } - } - if len(hostname) > hostnameMaxLen { - hostname = hostname[:hostnameMaxLen] - glog.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", pod.Name, hostnameMaxLen, hostname) - } - - hostDomain := "" - if len(pod.Spec.Subdomain) > 0 { - if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Subdomain); len(msgs) != 0 { - return "", "", fmt.Errorf("Pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";")) - } - hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain) - } else { - subdomainCandidate := pod.Annotations[utilpod.PodSubdomainAnnotation] - if len(utilvalidation.IsDNS1123Label(subdomainCandidate)) == 0 { - hostDomain = fmt.Sprintf("%s.%s.svc.%s", subdomainCandidate, pod.Namespace, clusterDomain) - } - } - return hostname, hostDomain, nil -} - -// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by -// the container runtime to set parameters for launching a container. -func (kl *Kubelet) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { - var err error - opts := &kubecontainer.RunContainerOptions{CgroupParent: kl.cgroupRoot} - hostname, hostDomainName, err := kl.GeneratePodHostNameAndDomain(pod) - if err != nil { - return nil, err - } - opts.Hostname = hostname - podName := volumehelper.GetUniquePodName(pod) - volumes := kl.volumeManager.GetMountedVolumesForPod(podName) - - opts.PortMappings = makePortMappings(container) - // Docker does not relabel volumes if the container is running - // in the host pid or ipc namespaces so the kubelet must - // relabel the volumes - if pod.Spec.SecurityContext != nil && (pod.Spec.SecurityContext.HostIPC || pod.Spec.SecurityContext.HostPID) { - err = kl.relabelVolumes(pod, volumes) - if err != nil { - return nil, err - } - } - - opts.Mounts, err = makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes) - if err != nil { - return nil, err - } - opts.Envs, err = kl.makeEnvironmentVariables(pod, container, podIP) - if err != nil { - return nil, err - } - - if len(container.TerminationMessagePath) != 0 { - p := kl.getPodContainerDir(pod.UID, container.Name) - if err := os.MkdirAll(p, 0750); err != nil { - glog.Errorf("Error on creating %q: %v", p, err) - } else { - opts.PodContainerDir = p - } - } - - opts.DNS, opts.DNSSearch, err = kl.GetClusterDNS(pod) - if err != nil { - return nil, err - } - - return opts, nil -} - -var masterServices = sets.NewString("kubernetes") - -// getServiceEnvVarMap makes a map[string]string of env vars for services a -// pod in namespace ns should see. -func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { - var ( - serviceMap = make(map[string]*api.Service) - m = make(map[string]string) - ) - - // Get all service resources from the master (via a cache), - // and populate them into service environment variables. - if kl.serviceLister == nil { - // Kubelets without masters (e.g. plain GCE ContainerVM) don't set env vars. - return m, nil - } - services, err := kl.serviceLister.List(labels.Everything()) - if err != nil { - return m, fmt.Errorf("failed to list services when setting up env vars.") - } - - // project the services in namespace ns onto the master services - for i := range services { - service := services[i] - // ignore services where ClusterIP is "None" or empty - if !api.IsServiceIPSet(service) { - continue - } - serviceName := service.Name - - switch service.Namespace { - // for the case whether the master service namespace is the namespace the pod - // is in, the pod should receive all the services in the namespace. - // - // ordering of the case clauses below enforces this - case ns: - serviceMap[serviceName] = service - case kl.masterServiceNamespace: - if masterServices.Has(serviceName) { - if _, exists := serviceMap[serviceName]; !exists { - serviceMap[serviceName] = service - } - } - } - } - - mappedServices := []*api.Service{} - for key := range serviceMap { - mappedServices = append(mappedServices, serviceMap[key]) - } - - for _, e := range envvars.FromServices(mappedServices) { - m[e.Name] = e.Value - } - return m, nil -} - -// Make the environment variables for a pod in the given namespace. -func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Container, podIP string) ([]kubecontainer.EnvVar, error) { - var result []kubecontainer.EnvVar - // Note: These are added to the docker Config, but are not included in the checksum computed - // by dockertools.BuildDockerName(...). That way, we can still determine whether an - // api.Container is already running by its hash. (We don't want to restart a container just - // because some service changed.) - // - // Note that there is a race between Kubelet seeing the pod and kubelet seeing the service. - // To avoid this users can: (1) wait between starting a service and starting; or (2) detect - // missing service env var and exit and be restarted; or (3) use DNS instead of env vars - // and keep trying to resolve the DNS name of the service (recommended). - serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace) - if err != nil { - return result, err - } - - // Determine the final values of variables: - // - // 1. Determine the final value of each variable: - // a. If the variable's Value is set, expand the `$(var)` references to other - // variables in the .Value field; the sources of variables are the declared - // variables of the container and the service environment variables - // b. If a source is defined for an environment variable, resolve the source - // 2. Create the container's environment in the order variables are declared - // 3. Add remaining service environment vars - var ( - tmpEnv = make(map[string]string) - configMaps = make(map[string]*api.ConfigMap) - secrets = make(map[string]*api.Secret) - mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv) - ) - for _, envVar := range container.Env { - // Accesses apiserver+Pods. - // So, the master may set service env vars, or kubelet may. In case both are doing - // it, we delete the key from the kubelet-generated ones so we don't have duplicate - // env vars. - // TODO: remove this net line once all platforms use apiserver+Pods. - delete(serviceEnv, envVar.Name) - - runtimeVal := envVar.Value - if runtimeVal != "" { - // Step 1a: expand variable references - runtimeVal = expansion.Expand(runtimeVal, mappingFunc) - } else if envVar.ValueFrom != nil { - // Step 1b: resolve alternate env var sources - switch { - case envVar.ValueFrom.FieldRef != nil: - runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP) - if err != nil { - return result, err - } - case envVar.ValueFrom.ResourceFieldRef != nil: - defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardApi(pod, container) - if err != nil { - return result, err - } - runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, defaultedContainer) - if err != nil { - return result, err - } - case envVar.ValueFrom.ConfigMapKeyRef != nil: - name := envVar.ValueFrom.ConfigMapKeyRef.Name - key := envVar.ValueFrom.ConfigMapKeyRef.Key - configMap, ok := configMaps[name] - if !ok { - if kl.kubeClient == nil { - return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name) - } - configMap, err = kl.kubeClient.Core().ConfigMaps(pod.Namespace).Get(name) - if err != nil { - return result, err - } - configMaps[name] = configMap - } - runtimeVal, ok = configMap.Data[key] - if !ok { - return result, fmt.Errorf("Couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name) - } - case envVar.ValueFrom.SecretKeyRef != nil: - name := envVar.ValueFrom.SecretKeyRef.Name - key := envVar.ValueFrom.SecretKeyRef.Key - secret, ok := secrets[name] - if !ok { - if kl.kubeClient == nil { - return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name) - } - secret, err = kl.kubeClient.Core().Secrets(pod.Namespace).Get(name) - if err != nil { - return result, err - } - secrets[name] = secret - } - runtimeValBytes, ok := secret.Data[key] - if !ok { - return result, fmt.Errorf("Couldn't find key %v in Secret %v/%v", key, pod.Namespace, name) - } - runtimeVal = string(runtimeValBytes) - } - } - - tmpEnv[envVar.Name] = runtimeVal - result = append(result, kubecontainer.EnvVar{Name: envVar.Name, Value: tmpEnv[envVar.Name]}) - } - - // Append remaining service env vars. - for k, v := range serviceEnv { - result = append(result, kubecontainer.EnvVar{Name: k, Value: v}) - } - return result, nil -} - -// podFieldSelectorRuntimeValue returns the runtime value of the given -// selector for a pod. -func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *api.ObjectFieldSelector, pod *api.Pod, podIP string) (string, error) { - internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") - if err != nil { - return "", err - } - switch internalFieldPath { - case "spec.nodeName": - return pod.Spec.NodeName, nil - case "spec.serviceAccountName": - return pod.Spec.ServiceAccountName, nil - case "status.podIP": - return podIP, nil - } - return fieldpath.ExtractFieldPathAsString(pod, internalFieldPath) -} - -// containerResourceRuntimeValue returns the value of the provided container resource -func containerResourceRuntimeValue(fs *api.ResourceFieldSelector, pod *api.Pod, container *api.Container) (string, error) { - containerName := fs.ContainerName - if len(containerName) == 0 { - return fieldpath.ExtractContainerResourceValue(fs, container) - } else { - return fieldpath.ExtractResourceValueByContainerName(fs, pod, containerName) - } -} - // GetClusterDNS returns a list of the DNS servers and a list of the DNS search // domains of the cluster. func (kl *Kubelet) GetClusterDNS(pod *api.Pod) ([]string, []string, error) { @@ -1725,33 +1278,6 @@ func (kl *Kubelet) GetClusterDNS(pod *api.Pod) ([]string, []string, error) { return dns, dnsSearch, nil } -// One of the following arguments must be non-nil: runningPod, status. -// TODO: Modify containerRuntime.KillPod() to accept the right arguments. -func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error { - var p kubecontainer.Pod - if runningPod != nil { - p = *runningPod - } else if status != nil { - p = kubecontainer.ConvertPodStatusToRunningPod(kl.GetRuntime().Type(), status) - } - return kl.containerRuntime.KillPod(pod, p, gracePeriodOverride) -} - -// makePodDataDirs creates the dirs for the pod datas. -func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error { - uid := pod.UID - if err := os.MkdirAll(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) { - return err - } - if err := os.MkdirAll(kl.getPodVolumesDir(uid), 0750); err != nil && !os.IsExist(err) { - return err - } - if err := os.MkdirAll(kl.getPodPluginsDir(uid), 0750); err != nil && !os.IsExist(err) { - return err - } - return nil -} - // syncPod is the transaction script for the sync of a single pod. // // Arguments: @@ -1921,32 +1447,6 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { return nil } -// returns whether the pod uses the host network namespace. -func podUsesHostNetwork(pod *api.Pod) bool { - return pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork -} - -// getPullSecretsForPod inspects the Pod and retrieves the referenced pull -// secrets. -// TODO: duplicate secrets are being retrieved multiple times and there -// is no cache. Creating and using a secret manager interface will make this -// easier to address. -func (kl *Kubelet) getPullSecretsForPod(pod *api.Pod) ([]api.Secret, error) { - pullSecrets := []api.Secret{} - - for _, secretRef := range pod.Spec.ImagePullSecrets { - secret, err := kl.kubeClient.Core().Secrets(pod.Namespace).Get(secretRef.Name) - if err != nil { - glog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err) - continue - } - - pullSecrets = append(pullSecrets, *secret) - } - - return pullSecrets, nil -} - // Get pods which should be resynchronized. Currently, the following pod should be resynchronized: // * pod whose work is ready. // * internal modules that request sync of a pod. @@ -1974,50 +1474,6 @@ func (kl *Kubelet) getPodsToSync() []*api.Pod { return podsToSync } -// Returns true if pod is in the terminated state ("Failed" or "Succeeded"). -func (kl *Kubelet) podIsTerminated(pod *api.Pod) bool { - var status api.PodStatus - // Check the cached pod status which was set after the last sync. - status, ok := kl.statusManager.GetPodStatus(pod.UID) - if !ok { - // If there is no cached status, use the status from the - // apiserver. This is useful if kubelet has recently been - // restarted. - status = pod.Status - } - if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded { - return true - } - - return false -} - -// filterOutTerminatedPods returns the given pods which the status manager -// does not consider failed or succeeded. -func (kl *Kubelet) filterOutTerminatedPods(pods []*api.Pod) []*api.Pod { - var filteredPods []*api.Pod - for _, p := range pods { - if kl.podIsTerminated(p) { - continue - } - filteredPods = append(filteredPods, p) - } - return filteredPods -} - -// removeOrphanedPodStatuses removes obsolete entries in podStatus where -// the pod is no longer considered bound to this node. -func (kl *Kubelet) removeOrphanedPodStatuses(pods []*api.Pod, mirrorPods []*api.Pod) { - podUIDs := make(map[types.UID]bool) - for _, pod := range pods { - podUIDs[pod.UID] = true - } - for _, pod := range mirrorPods { - podUIDs[pod.UID] = true - } - kl.statusManager.RemoveOrphanedStatuses(podUIDs) -} - // deletePod deletes the pod from the internal state of the kubelet by: // 1. stopping the associated pod worker asynchronously // 2. signaling to kill the pod by sending on the podKillingCh channel @@ -2054,135 +1510,6 @@ func (kl *Kubelet) deletePod(pod *api.Pod) error { return nil } -// HandlePodCleanups performs a series of cleanup work, including terminating -// pod workers, killing unwanted pods, and removing orphaned volumes/pod -// directories. -// NOTE: This function is executed by the main sync loop, so it -// should not contain any blocking calls. -func (kl *Kubelet) HandlePodCleanups() error { - allPods, mirrorPods := kl.podManager.GetPodsAndMirrorPods() - // Pod phase progresses monotonically. Once a pod has reached a final state, - // it should never leave regardless of the restart policy. The statuses - // of such pods should not be changed, and there is no need to sync them. - // TODO: the logic here does not handle two cases: - // 1. If the containers were removed immediately after they died, kubelet - // may fail to generate correct statuses, let alone filtering correctly. - // 2. If kubelet restarted before writing the terminated status for a pod - // to the apiserver, it could still restart the terminated pod (even - // though the pod was not considered terminated by the apiserver). - // These two conditions could be alleviated by checkpointing kubelet. - activePods := kl.filterOutTerminatedPods(allPods) - - desiredPods := make(map[types.UID]empty) - for _, pod := range activePods { - desiredPods[pod.UID] = empty{} - } - // Stop the workers for no-longer existing pods. - // TODO: is here the best place to forget pod workers? - kl.podWorkers.ForgetNonExistingPodWorkers(desiredPods) - kl.probeManager.CleanupPods(activePods) - - runningPods, err := kl.runtimeCache.GetPods() - if err != nil { - glog.Errorf("Error listing containers: %#v", err) - return err - } - for _, pod := range runningPods { - if _, found := desiredPods[pod.ID]; !found { - kl.podKillingCh <- &kubecontainer.PodPair{APIPod: nil, RunningPod: pod} - } - } - - kl.removeOrphanedPodStatuses(allPods, mirrorPods) - // Note that we just killed the unwanted pods. This may not have reflected - // in the cache. We need to bypass the cache to get the latest set of - // running pods to clean up the volumes. - // TODO: Evaluate the performance impact of bypassing the runtime cache. - runningPods, err = kl.containerRuntime.GetPods(false) - if err != nil { - glog.Errorf("Error listing containers: %#v", err) - return err - } - - // Remove any orphaned volumes. - // Note that we pass all pods (including terminated pods) to the function, - // so that we don't remove volumes associated with terminated but not yet - // deleted pods. - err = kl.cleanupOrphanedPodDirs(allPods, runningPods) - if err != nil { - // We want all cleanup tasks to be run even if one of them failed. So - // we just log an error here and continue other cleanup tasks. - // This also applies to the other clean up tasks. - glog.Errorf("Failed cleaning up orphaned pod directories: %v", err) - } - - // Remove any orphaned mirror pods. - kl.podManager.DeleteOrphanedMirrorPods() - - // Clear out any old bandwidth rules - err = kl.cleanupBandwidthLimits(allPods) - if err != nil { - glog.Errorf("Failed cleaning up bandwidth limits: %v", err) - } - - kl.backOff.GC() - return nil -} - -// podKiller launches a goroutine to kill a pod received from the channel if -// another goroutine isn't already in action. -func (kl *Kubelet) podKiller() { - killing := sets.NewString() - resultCh := make(chan types.UID) - defer close(resultCh) - for { - select { - case podPair, ok := <-kl.podKillingCh: - if !ok { - return - } - - runningPod := podPair.RunningPod - apiPod := podPair.APIPod - - if killing.Has(string(runningPod.ID)) { - // The pod is already being killed. - break - } - killing.Insert(string(runningPod.ID)) - go func(apiPod *api.Pod, runningPod *kubecontainer.Pod, ch chan types.UID) { - defer func() { - ch <- runningPod.ID - }() - glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name) - err := kl.killPod(apiPod, runningPod, nil, nil) - if err != nil { - glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err) - } - }(apiPod, runningPod, resultCh) - - case podID := <-resultCh: - killing.Delete(string(podID)) - } - } -} - -// checkHostPortConflicts detects pods with conflicted host ports. -func hasHostPortConflicts(pods []*api.Pod) bool { - ports := sets.String{} - for _, pod := range pods { - if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 { - glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs) - return true - } - if errs := validation.AccumulateUniqueHostPorts(pod.Spec.InitContainers, &ports, field.NewPath("spec", "initContainers")); len(errs) > 0 { - glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs) - return true - } - } - return false -} - // handleOutOfDisk detects if pods can't fit due to lack of disk space. func (kl *Kubelet) isOutOfDisk() bool { // Check disk space once globally and reject or accept all new pods. @@ -2220,7 +1547,6 @@ func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) { // can be admitted, a brief single-word reason and a message explaining why // the pod cannot be admitted. func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, string) { - // the kubelet will invoke each pod admit handler in sequence // if any handler rejects, the pod is rejected. // TODO: move out of disk check into a pod admitter @@ -2546,102 +1872,6 @@ func (kl *Kubelet) PLEGHealthCheck() (bool, error) { return kl.pleg.Healthy() } -// validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state -// of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current -// running container is preferred over a previous termination. If info about the container is not available then a specific -// error is returned to the end user. -func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *api.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) { - var cID string - - cStatus, found := api.GetContainerStatus(podStatus.ContainerStatuses, containerName) - // if not found, check the init containers - if !found { - cStatus, found = api.GetContainerStatus(podStatus.InitContainerStatuses, containerName) - } - if !found { - return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName) - } - lastState := cStatus.LastTerminationState - waiting, running, terminated := cStatus.State.Waiting, cStatus.State.Running, cStatus.State.Terminated - - switch { - case previous: - if lastState.Terminated == nil { - return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName) - } - cID = lastState.Terminated.ContainerID - - case running != nil: - cID = cStatus.ContainerID - - case terminated != nil: - cID = terminated.ContainerID - - case lastState.Terminated != nil: - cID = lastState.Terminated.ContainerID - - case waiting != nil: - // output some info for the most common pending failures - switch reason := waiting.Reason; reason { - case images.ErrImagePull.Error(): - return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: image can't be pulled", containerName, podName) - case images.ErrImagePullBackOff.Error(): - return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: trying and failing to pull image", containerName, podName) - default: - return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: %v", containerName, podName, reason) - } - default: - // unrecognized state - return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start - no logs yet", containerName, podName) - } - - return kubecontainer.ParseContainerID(cID), nil -} - -// GetKubeletContainerLogs returns logs from the container -// TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt -// or all of them. -func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error { - // Pod workers periodically write status to statusManager. If status is not - // cached there, something is wrong (or kubelet just restarted and hasn't - // caught up yet). Just assume the pod is not ready yet. - name, namespace, err := kubecontainer.ParsePodFullName(podFullName) - if err != nil { - return fmt.Errorf("unable to parse pod full name %q: %v", podFullName, err) - } - - pod, ok := kl.GetPodByName(namespace, name) - if !ok { - return fmt.Errorf("pod %q cannot be found - no logs available", name) - } - - podUID := pod.UID - if mirrorPod, ok := kl.podManager.GetMirrorPodByPod(pod); ok { - podUID = mirrorPod.UID - } - podStatus, found := kl.statusManager.GetPodStatus(podUID) - if !found { - // If there is no cached status, use the status from the - // apiserver. This is useful if kubelet has recently been - // restarted. - podStatus = pod.Status - } - - containerID, err := kl.validateContainerLogStatus(pod.Name, &podStatus, containerName, logOptions.Previous) - if err != nil { - return err - } - - // Do a zero-byte write to stdout before handing off to the container runtime. - // This ensures at least one Write call is made to the writer when copying starts, - // even if we then block waiting for log output from the container. - if _, err := stdout.Write([]byte{}); err != nil { - return err - } - - return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr) -} - // updateRuntimeUp calls the container runtime status callback, initializing // the runtime dependent modules when the container runtime first comes up, // and returns an error if the status check fails. If the status check is OK, @@ -2655,6 +1885,8 @@ func (kl *Kubelet) updateRuntimeUp() { kl.runtimeState.setRuntimeSync(kl.clock.Now()) } +// updateCloudProviderFromMachineInfo updates the node's provider ID field +// from the given cadvisor machine info. func (kl *Kubelet) updateCloudProviderFromMachineInfo(node *api.Node, info *cadvisorapi.MachineInfo) { if info.CloudProvider != cadvisorapi.UnknownProvider && info.CloudProvider != cadvisorapi.Baremetal { @@ -2667,409 +1899,6 @@ func (kl *Kubelet) updateCloudProviderFromMachineInfo(node *api.Node, info *cadv } } -// GetPhase returns the phase of a pod given its container info. -// This func is exported to simplify integration with 3rd party kubelet -// integrations like kubernetes-mesos. -func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase { - initialized := 0 - pendingInitialization := 0 - failedInitialization := 0 - for _, container := range spec.InitContainers { - containerStatus, ok := api.GetContainerStatus(info, container.Name) - if !ok { - pendingInitialization++ - continue - } - - switch { - case containerStatus.State.Running != nil: - pendingInitialization++ - case containerStatus.State.Terminated != nil: - if containerStatus.State.Terminated.ExitCode == 0 { - initialized++ - } else { - failedInitialization++ - } - case containerStatus.State.Waiting != nil: - if containerStatus.LastTerminationState.Terminated != nil { - if containerStatus.LastTerminationState.Terminated.ExitCode == 0 { - initialized++ - } else { - failedInitialization++ - } - } else { - pendingInitialization++ - } - default: - pendingInitialization++ - } - } - - unknown := 0 - running := 0 - waiting := 0 - stopped := 0 - failed := 0 - succeeded := 0 - for _, container := range spec.Containers { - containerStatus, ok := api.GetContainerStatus(info, container.Name) - if !ok { - unknown++ - continue - } - - switch { - case containerStatus.State.Running != nil: - running++ - case containerStatus.State.Terminated != nil: - stopped++ - if containerStatus.State.Terminated.ExitCode == 0 { - succeeded++ - } else { - failed++ - } - case containerStatus.State.Waiting != nil: - if containerStatus.LastTerminationState.Terminated != nil { - stopped++ - } else { - waiting++ - } - default: - unknown++ - } - } - - if failedInitialization > 0 && spec.RestartPolicy == api.RestartPolicyNever { - return api.PodFailed - } - - switch { - case pendingInitialization > 0: - fallthrough - case waiting > 0: - glog.V(5).Infof("pod waiting > 0, pending") - // One or more containers has not been started - return api.PodPending - case running > 0 && unknown == 0: - // All containers have been started, and at least - // one container is running - return api.PodRunning - case running == 0 && stopped > 0 && unknown == 0: - // All containers are terminated - if spec.RestartPolicy == api.RestartPolicyAlways { - // All containers are in the process of restarting - return api.PodRunning - } - if stopped == succeeded { - // RestartPolicy is not Always, and all - // containers are terminated in success - return api.PodSucceeded - } - if spec.RestartPolicy == api.RestartPolicyNever { - // RestartPolicy is Never, and all containers are - // terminated with at least one in failure - return api.PodFailed - } - // RestartPolicy is OnFailure, and at least one in failure - // and in the process of restarting - return api.PodRunning - default: - glog.V(5).Infof("pod default case, pending") - return api.PodPending - } -} - -// generateAPIPodStatus creates the final API pod status for a pod, given the -// internal pod status. -func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus { - glog.V(3).Infof("Generating status for %q", format.Pod(pod)) - - // check if an internal module has requested the pod is evicted. - for _, podSyncHandler := range kl.PodSyncHandlers { - if result := podSyncHandler.ShouldEvict(pod); result.Evict { - return api.PodStatus{ - Phase: api.PodFailed, - Reason: result.Reason, - Message: result.Message, - } - } - } - - s := kl.convertStatusToAPIStatus(pod, podStatus) - - // Assume info is ready to process - spec := &pod.Spec - allStatus := append(append([]api.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...) - s.Phase = GetPhase(spec, allStatus) - kl.probeManager.UpdatePodStatus(pod.UID, s) - s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase)) - s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase)) - // s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus() - // does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure - // it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true. - if _, oldPodScheduled := api.GetPodCondition(&pod.Status, api.PodScheduled); oldPodScheduled != nil { - s.Conditions = append(s.Conditions, *oldPodScheduled) - } - api.UpdatePodCondition(&pod.Status, &api.PodCondition{ - Type: api.PodScheduled, - Status: api.ConditionTrue, - }) - - if !kl.standaloneMode { - hostIP, err := kl.getHostIPAnyWay() - if err != nil { - glog.V(4).Infof("Cannot get host IP: %v", err) - } else { - s.HostIP = hostIP.String() - if podUsesHostNetwork(pod) && s.PodIP == "" { - s.PodIP = hostIP.String() - } - } - } - - return *s -} - -// convertStatusToAPIStatus creates an api PodStatus for the given pod from -// the given internal pod status. It is purely transformative and does not -// alter the kubelet state at all. -func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) *api.PodStatus { - var apiPodStatus api.PodStatus - apiPodStatus.PodIP = podStatus.IP - - apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses( - pod, podStatus, - pod.Status.ContainerStatuses, - pod.Spec.Containers, - len(pod.Spec.InitContainers) > 0, - false, - ) - apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses( - pod, podStatus, - pod.Status.InitContainerStatuses, - pod.Spec.InitContainers, - len(pod.Spec.InitContainers) > 0, - true, - ) - - return &apiPodStatus -} - -// convertToAPIContainerStatuses converts the given internal container -// statuses into API container statuses. -func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubecontainer.PodStatus, previousStatus []api.ContainerStatus, containers []api.Container, hasInitContainers, isInitContainer bool) []api.ContainerStatus { - convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *api.ContainerStatus { - cid := cs.ID.String() - status := &api.ContainerStatus{ - Name: cs.Name, - RestartCount: int32(cs.RestartCount), - Image: cs.Image, - ImageID: cs.ImageID, - ContainerID: cid, - } - switch cs.State { - case kubecontainer.ContainerStateRunning: - status.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.NewTime(cs.StartedAt)} - case kubecontainer.ContainerStateExited: - status.State.Terminated = &api.ContainerStateTerminated{ - ExitCode: int32(cs.ExitCode), - Reason: cs.Reason, - Message: cs.Message, - StartedAt: unversioned.NewTime(cs.StartedAt), - FinishedAt: unversioned.NewTime(cs.FinishedAt), - ContainerID: cid, - } - default: - status.State.Waiting = &api.ContainerStateWaiting{} - } - return status - } - - // Fetch old containers statuses from old pod status. - oldStatuses := make(map[string]api.ContainerStatus, len(containers)) - for _, status := range previousStatus { - oldStatuses[status.Name] = status - } - - // Set all container statuses to default waiting state - statuses := make(map[string]*api.ContainerStatus, len(containers)) - defaultWaitingState := api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerCreating"}} - if hasInitContainers { - defaultWaitingState = api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "PodInitializing"}} - } - - for _, container := range containers { - status := &api.ContainerStatus{ - Name: container.Name, - Image: container.Image, - State: defaultWaitingState, - } - // Apply some values from the old statuses as the default values. - if oldStatus, found := oldStatuses[container.Name]; found { - status.RestartCount = oldStatus.RestartCount - status.LastTerminationState = oldStatus.LastTerminationState - } - statuses[container.Name] = status - } - - // Make the latest container status comes first. - sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(podStatus.ContainerStatuses))) - // Set container statuses according to the statuses seen in pod status - containerSeen := map[string]int{} - for _, cStatus := range podStatus.ContainerStatuses { - cName := cStatus.Name - if _, ok := statuses[cName]; !ok { - // This would also ignore the infra container. - continue - } - if containerSeen[cName] >= 2 { - continue - } - status := convertContainerStatus(cStatus) - if containerSeen[cName] == 0 { - statuses[cName] = status - } else { - statuses[cName].LastTerminationState = status.State - } - containerSeen[cName] = containerSeen[cName] + 1 - } - - // Handle the containers failed to be started, which should be in Waiting state. - for _, container := range containers { - if isInitContainer { - // If the init container is terminated with exit code 0, it won't be restarted. - // TODO(random-liu): Handle this in a cleaner way. - s := podStatus.FindContainerStatusByName(container.Name) - if s != nil && s.State == kubecontainer.ContainerStateExited && s.ExitCode == 0 { - continue - } - } - // If a container should be restarted in next syncpod, it is *Waiting*. - if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) { - continue - } - status := statuses[container.Name] - reason, message, ok := kl.reasonCache.Get(pod.UID, container.Name) - if !ok { - // In fact, we could also apply Waiting state here, but it is less informative, - // and the container will be restarted soon, so we prefer the original state here. - // Note that with the current implementation of ShouldContainerBeRestarted the original state here - // could be: - // * Waiting: There is no associated historical container and start failure reason record. - // * Terminated: The container is terminated. - continue - } - if status.State.Terminated != nil { - status.LastTerminationState = status.State - } - status.State = api.ContainerState{ - Waiting: &api.ContainerStateWaiting{ - Reason: reason.Error(), - Message: message, - }, - } - statuses[container.Name] = status - } - - var containerStatuses []api.ContainerStatus - for _, status := range statuses { - containerStatuses = append(containerStatuses, *status) - } - - // Sort the container statuses since clients of this interface expect the list - // of containers in a pod has a deterministic order. - if isInitContainer { - kubetypes.SortInitContainerStatuses(pod, containerStatuses) - } else { - sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses)) - } - return containerStatuses -} - -// Returns logs of current machine. -func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) { - // TODO: whitelist logs we are willing to serve - kl.logServer.ServeHTTP(w, req) -} - -// findContainer finds and returns the container with the given pod ID, full name, and container name. -// It returns nil if not found. -func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) { - pods, err := kl.containerRuntime.GetPods(false) - if err != nil { - return nil, err - } - pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID) - return pod.FindContainerByName(containerName), nil -} - -// Run a command in a container, returns the combined stdout, stderr as an array of bytes -func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) { - podUID = kl.podManager.TranslatePodUID(podUID) - - container, err := kl.findContainer(podFullName, podUID, containerName) - if err != nil { - return nil, err - } - if container == nil { - return nil, fmt.Errorf("container not found (%q)", containerName) - } - - var buffer bytes.Buffer - output := ioutils.WriteCloserWrapper(&buffer) - err = kl.runner.ExecInContainer(container.ID, cmd, nil, output, output, false, nil) - // Even if err is non-nil, there still may be output (e.g. the exec wrote to stdout or stderr but - // the command returned a nonzero exit code). Therefore, always return the output along with the - // error. - return buffer.Bytes(), err -} - -// ExecInContainer executes a command in a container, connecting the supplied -// stdin/stdout/stderr to the command's IO streams. -func (kl *Kubelet) ExecInContainer(podFullName string, podUID types.UID, containerName string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size) error { - podUID = kl.podManager.TranslatePodUID(podUID) - - container, err := kl.findContainer(podFullName, podUID, containerName) - if err != nil { - return err - } - if container == nil { - return fmt.Errorf("container not found (%q)", containerName) - } - return kl.runner.ExecInContainer(container.ID, cmd, stdin, stdout, stderr, tty, resize) -} - -// AttachContainer uses the container runtime to attach the given streams to -// the given container. -func (kl *Kubelet) AttachContainer(podFullName string, podUID types.UID, containerName string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size) error { - podUID = kl.podManager.TranslatePodUID(podUID) - - container, err := kl.findContainer(podFullName, podUID, containerName) - if err != nil { - return err - } - if container == nil { - return fmt.Errorf("container not found (%q)", containerName) - } - return kl.containerRuntime.AttachContainer(container.ID, stdin, stdout, stderr, tty, resize) -} - -// PortForward connects to the pod's port and copies data between the port -// and the stream. -func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16, stream io.ReadWriteCloser) error { - podUID = kl.podManager.TranslatePodUID(podUID) - - pods, err := kl.containerRuntime.GetPods(false) - if err != nil { - return err - } - pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID) - if pod.IsEmpty() { - return fmt.Errorf("pod not found (%q)", podFullName) - } - return kl.runner.PortForward(&pod, port, stream) -} - // GetConfiguration returns the KubeletConfiguration used to configure the kubelet. func (kl *Kubelet) GetConfiguration() componentconfig.KubeletConfiguration { return kl.kubeletConfiguration diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go new file mode 100644 index 0000000000..d0c7d6068f --- /dev/null +++ b/pkg/kubelet/kubelet_pods.go @@ -0,0 +1,1210 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "sort" + "strings" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + utilpod "k8s.io/kubernetes/pkg/api/pod" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/fieldpath" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/envvars" + "k8s.io/kubernetes/pkg/kubelet/images" + "k8s.io/kubernetes/pkg/kubelet/status" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/kubelet/util/ioutils" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/term" + utilvalidation "k8s.io/kubernetes/pkg/util/validation" + "k8s.io/kubernetes/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/third_party/forked/golang/expansion" +) + +// Get a list of pods that have data directories. +func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) { + podInfos, err := ioutil.ReadDir(kl.getPodsDir()) + if err != nil { + return nil, err + } + pods := []types.UID{} + for i := range podInfos { + if podInfos[i].IsDir() { + pods = append(pods, types.UID(podInfos[i].Name())) + } + } + return pods, nil +} + +// getActivePods returns non-terminal pods +func (kl *Kubelet) getActivePods() []*api.Pod { + allPods := kl.podManager.GetPods() + activePods := kl.filterOutTerminatedPods(allPods) + return activePods +} + +// makeMounts determines the mount points for the given container. +func makeMounts(pod *api.Pod, podDir string, container *api.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) { + // Kubernetes only mounts on /etc/hosts if : + // - container does not use hostNetwork and + // - container is not an infrastructure(pause) container + // - container is not already mounting on /etc/hosts + // When the pause container is being created, its IP is still unknown. Hence, PodIP will not have been set. + mountEtcHostsFile := (pod.Spec.SecurityContext == nil || !pod.Spec.SecurityContext.HostNetwork) && len(podIP) > 0 + glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile) + mounts := []kubecontainer.Mount{} + for _, mount := range container.VolumeMounts { + mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath) + vol, ok := podVolumes[mount.Name] + if !ok { + glog.Warningf("Mount cannot be satisfied for container %q, because the volume is missing: %q", container.Name, mount) + continue + } + + relabelVolume := false + // If the volume supports SELinux and it has not been + // relabeled already and it is not a read-only volume, + // relabel it and mark it as labeled + if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SupportsSELinux && !vol.SELinuxLabeled { + vol.SELinuxLabeled = true + relabelVolume = true + } + hostPath, err := volume.GetPath(vol.Mounter) + if err != nil { + return nil, err + } + if mount.SubPath != "" { + hostPath = filepath.Join(hostPath, mount.SubPath) + } + mounts = append(mounts, kubecontainer.Mount{ + Name: mount.Name, + ContainerPath: mount.MountPath, + HostPath: hostPath, + ReadOnly: mount.ReadOnly, + SELinuxRelabel: relabelVolume, + }) + } + if mountEtcHostsFile { + hostsMount, err := makeHostsMount(podDir, podIP, hostName, hostDomain) + if err != nil { + return nil, err + } + mounts = append(mounts, *hostsMount) + } + return mounts, nil +} + +// makeHostsMount makes the mountpoint for the hosts file that the containers +// in a pod are injected with. +func makeHostsMount(podDir, podIP, hostName, hostDomainName string) (*kubecontainer.Mount, error) { + hostsFilePath := path.Join(podDir, "etc-hosts") + if err := ensureHostsFile(hostsFilePath, podIP, hostName, hostDomainName); err != nil { + return nil, err + } + return &kubecontainer.Mount{ + Name: "k8s-managed-etc-hosts", + ContainerPath: etcHostsPath, + HostPath: hostsFilePath, + ReadOnly: false, + }, nil +} + +// ensureHostsFile ensures that the given host file has an up-to-date ip, host +// name, and domain name. +func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string) error { + if _, err := os.Stat(fileName); os.IsExist(err) { + glog.V(4).Infof("kubernetes-managed etc-hosts file exits. Will not be recreated: %q", fileName) + return nil + } + var buffer bytes.Buffer + buffer.WriteString("# Kubernetes-managed hosts file.\n") + buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost + buffer.WriteString("::1\tlocalhost ip6-localhost ip6-loopback\n") // ipv6 localhost + buffer.WriteString("fe00::0\tip6-localnet\n") + buffer.WriteString("fe00::0\tip6-mcastprefix\n") + buffer.WriteString("fe00::1\tip6-allnodes\n") + buffer.WriteString("fe00::2\tip6-allrouters\n") + if len(hostDomainName) > 0 { + buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName)) + } else { + buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName)) + } + return ioutil.WriteFile(fileName, buffer.Bytes(), 0644) +} + +func makePortMappings(container *api.Container) (ports []kubecontainer.PortMapping) { + names := make(map[string]struct{}) + for _, p := range container.Ports { + pm := kubecontainer.PortMapping{ + HostPort: int(p.HostPort), + ContainerPort: int(p.ContainerPort), + Protocol: p.Protocol, + HostIP: p.HostIP, + } + + // We need to create some default port name if it's not specified, since + // this is necessary for rkt. + // http://issue.k8s.io/7710 + if p.Name == "" { + pm.Name = fmt.Sprintf("%s-%s:%d", container.Name, p.Protocol, p.ContainerPort) + } else { + pm.Name = fmt.Sprintf("%s-%s", container.Name, p.Name) + } + + // Protect against exposing the same protocol-port more than once in a container. + if _, ok := names[pm.Name]; ok { + glog.Warningf("Port name conflicted, %q is defined more than once", pm.Name) + continue + } + ports = append(ports, pm) + names[pm.Name] = struct{}{} + } + return +} + +// GeneratePodHostNameAndDomain creates a hostname and domain name for a pod, +// given that pod's spec and annotations or returns an error. +func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, error) { + // TODO(vmarmol): Handle better. + // Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char). + clusterDomain := kl.clusterDomain + const hostnameMaxLen = 63 + podAnnotations := pod.Annotations + if podAnnotations == nil { + podAnnotations = make(map[string]string) + } + hostname := pod.Name + if len(pod.Spec.Hostname) > 0 { + if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Hostname); len(msgs) != 0 { + return "", "", fmt.Errorf("Pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";")) + } + hostname = pod.Spec.Hostname + } else { + hostnameCandidate := podAnnotations[utilpod.PodHostnameAnnotation] + if len(utilvalidation.IsDNS1123Label(hostnameCandidate)) == 0 { + // use hostname annotation, if specified. + hostname = hostnameCandidate + } + } + if len(hostname) > hostnameMaxLen { + hostname = hostname[:hostnameMaxLen] + glog.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", pod.Name, hostnameMaxLen, hostname) + } + + hostDomain := "" + if len(pod.Spec.Subdomain) > 0 { + if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Subdomain); len(msgs) != 0 { + return "", "", fmt.Errorf("Pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";")) + } + hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain) + } else { + subdomainCandidate := pod.Annotations[utilpod.PodSubdomainAnnotation] + if len(utilvalidation.IsDNS1123Label(subdomainCandidate)) == 0 { + hostDomain = fmt.Sprintf("%s.%s.svc.%s", subdomainCandidate, pod.Namespace, clusterDomain) + } + } + return hostname, hostDomain, nil +} + +// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by +// the container runtime to set parameters for launching a container. +func (kl *Kubelet) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { + var err error + opts := &kubecontainer.RunContainerOptions{CgroupParent: kl.cgroupRoot} + hostname, hostDomainName, err := kl.GeneratePodHostNameAndDomain(pod) + if err != nil { + return nil, err + } + opts.Hostname = hostname + podName := volumehelper.GetUniquePodName(pod) + volumes := kl.volumeManager.GetMountedVolumesForPod(podName) + + opts.PortMappings = makePortMappings(container) + // Docker does not relabel volumes if the container is running + // in the host pid or ipc namespaces so the kubelet must + // relabel the volumes + if pod.Spec.SecurityContext != nil && (pod.Spec.SecurityContext.HostIPC || pod.Spec.SecurityContext.HostPID) { + err = kl.relabelVolumes(pod, volumes) + if err != nil { + return nil, err + } + } + + opts.Mounts, err = makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes) + if err != nil { + return nil, err + } + opts.Envs, err = kl.makeEnvironmentVariables(pod, container, podIP) + if err != nil { + return nil, err + } + + if len(container.TerminationMessagePath) != 0 { + p := kl.getPodContainerDir(pod.UID, container.Name) + if err := os.MkdirAll(p, 0750); err != nil { + glog.Errorf("Error on creating %q: %v", p, err) + } else { + opts.PodContainerDir = p + } + } + + opts.DNS, opts.DNSSearch, err = kl.GetClusterDNS(pod) + if err != nil { + return nil, err + } + + return opts, nil +} + +var masterServices = sets.NewString("kubernetes") + +// getServiceEnvVarMap makes a map[string]string of env vars for services a +// pod in namespace ns should see. +func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { + var ( + serviceMap = make(map[string]*api.Service) + m = make(map[string]string) + ) + + // Get all service resources from the master (via a cache), + // and populate them into service environment variables. + if kl.serviceLister == nil { + // Kubelets without masters (e.g. plain GCE ContainerVM) don't set env vars. + return m, nil + } + services, err := kl.serviceLister.List(labels.Everything()) + if err != nil { + return m, fmt.Errorf("failed to list services when setting up env vars.") + } + + // project the services in namespace ns onto the master services + for i := range services { + service := services[i] + // ignore services where ClusterIP is "None" or empty + if !api.IsServiceIPSet(service) { + continue + } + serviceName := service.Name + + switch service.Namespace { + // for the case whether the master service namespace is the namespace the pod + // is in, the pod should receive all the services in the namespace. + // + // ordering of the case clauses below enforces this + case ns: + serviceMap[serviceName] = service + case kl.masterServiceNamespace: + if masterServices.Has(serviceName) { + if _, exists := serviceMap[serviceName]; !exists { + serviceMap[serviceName] = service + } + } + } + } + + mappedServices := []*api.Service{} + for key := range serviceMap { + mappedServices = append(mappedServices, serviceMap[key]) + } + + for _, e := range envvars.FromServices(mappedServices) { + m[e.Name] = e.Value + } + return m, nil +} + +// Make the environment variables for a pod in the given namespace. +func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Container, podIP string) ([]kubecontainer.EnvVar, error) { + var result []kubecontainer.EnvVar + // Note: These are added to the docker Config, but are not included in the checksum computed + // by dockertools.BuildDockerName(...). That way, we can still determine whether an + // api.Container is already running by its hash. (We don't want to restart a container just + // because some service changed.) + // + // Note that there is a race between Kubelet seeing the pod and kubelet seeing the service. + // To avoid this users can: (1) wait between starting a service and starting; or (2) detect + // missing service env var and exit and be restarted; or (3) use DNS instead of env vars + // and keep trying to resolve the DNS name of the service (recommended). + serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace) + if err != nil { + return result, err + } + + // Determine the final values of variables: + // + // 1. Determine the final value of each variable: + // a. If the variable's Value is set, expand the `$(var)` references to other + // variables in the .Value field; the sources of variables are the declared + // variables of the container and the service environment variables + // b. If a source is defined for an environment variable, resolve the source + // 2. Create the container's environment in the order variables are declared + // 3. Add remaining service environment vars + var ( + tmpEnv = make(map[string]string) + configMaps = make(map[string]*api.ConfigMap) + secrets = make(map[string]*api.Secret) + mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv) + ) + for _, envVar := range container.Env { + // Accesses apiserver+Pods. + // So, the master may set service env vars, or kubelet may. In case both are doing + // it, we delete the key from the kubelet-generated ones so we don't have duplicate + // env vars. + // TODO: remove this net line once all platforms use apiserver+Pods. + delete(serviceEnv, envVar.Name) + + runtimeVal := envVar.Value + if runtimeVal != "" { + // Step 1a: expand variable references + runtimeVal = expansion.Expand(runtimeVal, mappingFunc) + } else if envVar.ValueFrom != nil { + // Step 1b: resolve alternate env var sources + switch { + case envVar.ValueFrom.FieldRef != nil: + runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP) + if err != nil { + return result, err + } + case envVar.ValueFrom.ResourceFieldRef != nil: + defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardApi(pod, container) + if err != nil { + return result, err + } + runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, defaultedContainer) + if err != nil { + return result, err + } + case envVar.ValueFrom.ConfigMapKeyRef != nil: + name := envVar.ValueFrom.ConfigMapKeyRef.Name + key := envVar.ValueFrom.ConfigMapKeyRef.Key + configMap, ok := configMaps[name] + if !ok { + if kl.kubeClient == nil { + return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name) + } + configMap, err = kl.kubeClient.Core().ConfigMaps(pod.Namespace).Get(name) + if err != nil { + return result, err + } + configMaps[name] = configMap + } + runtimeVal, ok = configMap.Data[key] + if !ok { + return result, fmt.Errorf("Couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name) + } + case envVar.ValueFrom.SecretKeyRef != nil: + name := envVar.ValueFrom.SecretKeyRef.Name + key := envVar.ValueFrom.SecretKeyRef.Key + secret, ok := secrets[name] + if !ok { + if kl.kubeClient == nil { + return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name) + } + secret, err = kl.kubeClient.Core().Secrets(pod.Namespace).Get(name) + if err != nil { + return result, err + } + secrets[name] = secret + } + runtimeValBytes, ok := secret.Data[key] + if !ok { + return result, fmt.Errorf("Couldn't find key %v in Secret %v/%v", key, pod.Namespace, name) + } + runtimeVal = string(runtimeValBytes) + } + } + + tmpEnv[envVar.Name] = runtimeVal + result = append(result, kubecontainer.EnvVar{Name: envVar.Name, Value: tmpEnv[envVar.Name]}) + } + + // Append remaining service env vars. + for k, v := range serviceEnv { + result = append(result, kubecontainer.EnvVar{Name: k, Value: v}) + } + return result, nil +} + +// podFieldSelectorRuntimeValue returns the runtime value of the given +// selector for a pod. +func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *api.ObjectFieldSelector, pod *api.Pod, podIP string) (string, error) { + internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") + if err != nil { + return "", err + } + switch internalFieldPath { + case "spec.nodeName": + return pod.Spec.NodeName, nil + case "spec.serviceAccountName": + return pod.Spec.ServiceAccountName, nil + case "status.podIP": + return podIP, nil + } + return fieldpath.ExtractFieldPathAsString(pod, internalFieldPath) +} + +// containerResourceRuntimeValue returns the value of the provided container resource +func containerResourceRuntimeValue(fs *api.ResourceFieldSelector, pod *api.Pod, container *api.Container) (string, error) { + containerName := fs.ContainerName + if len(containerName) == 0 { + return fieldpath.ExtractContainerResourceValue(fs, container) + } else { + return fieldpath.ExtractResourceValueByContainerName(fs, pod, containerName) + } +} + +// One of the following arguments must be non-nil: runningPod, status. +// TODO: Modify containerRuntime.KillPod() to accept the right arguments. +func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error { + var p kubecontainer.Pod + if runningPod != nil { + p = *runningPod + } else if status != nil { + p = kubecontainer.ConvertPodStatusToRunningPod(kl.GetRuntime().Type(), status) + } + return kl.containerRuntime.KillPod(pod, p, gracePeriodOverride) +} + +// makePodDataDirs creates the dirs for the pod datas. +func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error { + uid := pod.UID + if err := os.MkdirAll(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(kl.getPodVolumesDir(uid), 0750); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(kl.getPodPluginsDir(uid), 0750); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// returns whether the pod uses the host network namespace. +func podUsesHostNetwork(pod *api.Pod) bool { + return pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork +} + +// getPullSecretsForPod inspects the Pod and retrieves the referenced pull +// secrets. +// TODO: duplicate secrets are being retrieved multiple times and there +// is no cache. Creating and using a secret manager interface will make this +// easier to address. +func (kl *Kubelet) getPullSecretsForPod(pod *api.Pod) ([]api.Secret, error) { + pullSecrets := []api.Secret{} + + for _, secretRef := range pod.Spec.ImagePullSecrets { + secret, err := kl.kubeClient.Core().Secrets(pod.Namespace).Get(secretRef.Name) + if err != nil { + glog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err) + continue + } + + pullSecrets = append(pullSecrets, *secret) + } + + return pullSecrets, nil +} + +// Returns true if pod is in the terminated state ("Failed" or "Succeeded"). +func (kl *Kubelet) podIsTerminated(pod *api.Pod) bool { + var status api.PodStatus + // Check the cached pod status which was set after the last sync. + status, ok := kl.statusManager.GetPodStatus(pod.UID) + if !ok { + // If there is no cached status, use the status from the + // apiserver. This is useful if kubelet has recently been + // restarted. + status = pod.Status + } + if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded { + return true + } + + return false +} + +// filterOutTerminatedPods returns the given pods which the status manager +// does not consider failed or succeeded. +func (kl *Kubelet) filterOutTerminatedPods(pods []*api.Pod) []*api.Pod { + var filteredPods []*api.Pod + for _, p := range pods { + if kl.podIsTerminated(p) { + continue + } + filteredPods = append(filteredPods, p) + } + return filteredPods +} + +// removeOrphanedPodStatuses removes obsolete entries in podStatus where +// the pod is no longer considered bound to this node. +func (kl *Kubelet) removeOrphanedPodStatuses(pods []*api.Pod, mirrorPods []*api.Pod) { + podUIDs := make(map[types.UID]bool) + for _, pod := range pods { + podUIDs[pod.UID] = true + } + for _, pod := range mirrorPods { + podUIDs[pod.UID] = true + } + kl.statusManager.RemoveOrphanedStatuses(podUIDs) +} + +// HandlePodCleanups performs a series of cleanup work, including terminating +// pod workers, killing unwanted pods, and removing orphaned volumes/pod +// directories. +// NOTE: This function is executed by the main sync loop, so it +// should not contain any blocking calls. +func (kl *Kubelet) HandlePodCleanups() error { + allPods, mirrorPods := kl.podManager.GetPodsAndMirrorPods() + // Pod phase progresses monotonically. Once a pod has reached a final state, + // it should never leave regardless of the restart policy. The statuses + // of such pods should not be changed, and there is no need to sync them. + // TODO: the logic here does not handle two cases: + // 1. If the containers were removed immediately after they died, kubelet + // may fail to generate correct statuses, let alone filtering correctly. + // 2. If kubelet restarted before writing the terminated status for a pod + // to the apiserver, it could still restart the terminated pod (even + // though the pod was not considered terminated by the apiserver). + // These two conditions could be alleviated by checkpointing kubelet. + activePods := kl.filterOutTerminatedPods(allPods) + + desiredPods := make(map[types.UID]empty) + for _, pod := range activePods { + desiredPods[pod.UID] = empty{} + } + // Stop the workers for no-longer existing pods. + // TODO: is here the best place to forget pod workers? + kl.podWorkers.ForgetNonExistingPodWorkers(desiredPods) + kl.probeManager.CleanupPods(activePods) + + runningPods, err := kl.runtimeCache.GetPods() + if err != nil { + glog.Errorf("Error listing containers: %#v", err) + return err + } + for _, pod := range runningPods { + if _, found := desiredPods[pod.ID]; !found { + kl.podKillingCh <- &kubecontainer.PodPair{APIPod: nil, RunningPod: pod} + } + } + + kl.removeOrphanedPodStatuses(allPods, mirrorPods) + // Note that we just killed the unwanted pods. This may not have reflected + // in the cache. We need to bypass the cache to get the latest set of + // running pods to clean up the volumes. + // TODO: Evaluate the performance impact of bypassing the runtime cache. + runningPods, err = kl.containerRuntime.GetPods(false) + if err != nil { + glog.Errorf("Error listing containers: %#v", err) + return err + } + + // Remove any orphaned volumes. + // Note that we pass all pods (including terminated pods) to the function, + // so that we don't remove volumes associated with terminated but not yet + // deleted pods. + err = kl.cleanupOrphanedPodDirs(allPods, runningPods) + if err != nil { + // We want all cleanup tasks to be run even if one of them failed. So + // we just log an error here and continue other cleanup tasks. + // This also applies to the other clean up tasks. + glog.Errorf("Failed cleaning up orphaned pod directories: %v", err) + } + + // Remove any orphaned mirror pods. + kl.podManager.DeleteOrphanedMirrorPods() + + // Clear out any old bandwidth rules + err = kl.cleanupBandwidthLimits(allPods) + if err != nil { + glog.Errorf("Failed cleaning up bandwidth limits: %v", err) + } + + kl.backOff.GC() + return nil +} + +// podKiller launches a goroutine to kill a pod received from the channel if +// another goroutine isn't already in action. +func (kl *Kubelet) podKiller() { + killing := sets.NewString() + resultCh := make(chan types.UID) + defer close(resultCh) + for { + select { + case podPair, ok := <-kl.podKillingCh: + if !ok { + return + } + + runningPod := podPair.RunningPod + apiPod := podPair.APIPod + + if killing.Has(string(runningPod.ID)) { + // The pod is already being killed. + break + } + killing.Insert(string(runningPod.ID)) + go func(apiPod *api.Pod, runningPod *kubecontainer.Pod, ch chan types.UID) { + defer func() { + ch <- runningPod.ID + }() + glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name) + err := kl.killPod(apiPod, runningPod, nil, nil) + if err != nil { + glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err) + } + }(apiPod, runningPod, resultCh) + + case podID := <-resultCh: + killing.Delete(string(podID)) + } + } +} + +// checkHostPortConflicts detects pods with conflicted host ports. +func hasHostPortConflicts(pods []*api.Pod) bool { + ports := sets.String{} + for _, pod := range pods { + if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 { + glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs) + return true + } + if errs := validation.AccumulateUniqueHostPorts(pod.Spec.InitContainers, &ports, field.NewPath("spec", "initContainers")); len(errs) > 0 { + glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs) + return true + } + } + return false +} + +// validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state +// of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current +// running container is preferred over a previous termination. If info about the container is not available then a specific +// error is returned to the end user. +func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *api.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) { + var cID string + + cStatus, found := api.GetContainerStatus(podStatus.ContainerStatuses, containerName) + // if not found, check the init containers + if !found { + cStatus, found = api.GetContainerStatus(podStatus.InitContainerStatuses, containerName) + } + if !found { + return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName) + } + lastState := cStatus.LastTerminationState + waiting, running, terminated := cStatus.State.Waiting, cStatus.State.Running, cStatus.State.Terminated + + switch { + case previous: + if lastState.Terminated == nil { + return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName) + } + cID = lastState.Terminated.ContainerID + + case running != nil: + cID = cStatus.ContainerID + + case terminated != nil: + cID = terminated.ContainerID + + case lastState.Terminated != nil: + cID = lastState.Terminated.ContainerID + + case waiting != nil: + // output some info for the most common pending failures + switch reason := waiting.Reason; reason { + case images.ErrImagePull.Error(): + return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: image can't be pulled", containerName, podName) + case images.ErrImagePullBackOff.Error(): + return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: trying and failing to pull image", containerName, podName) + default: + return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: %v", containerName, podName, reason) + } + default: + // unrecognized state + return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start - no logs yet", containerName, podName) + } + + return kubecontainer.ParseContainerID(cID), nil +} + +// GetKubeletContainerLogs returns logs from the container +// TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt +// or all of them. +func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error { + // Pod workers periodically write status to statusManager. If status is not + // cached there, something is wrong (or kubelet just restarted and hasn't + // caught up yet). Just assume the pod is not ready yet. + name, namespace, err := kubecontainer.ParsePodFullName(podFullName) + if err != nil { + return fmt.Errorf("unable to parse pod full name %q: %v", podFullName, err) + } + + pod, ok := kl.GetPodByName(namespace, name) + if !ok { + return fmt.Errorf("pod %q cannot be found - no logs available", name) + } + + podUID := pod.UID + if mirrorPod, ok := kl.podManager.GetMirrorPodByPod(pod); ok { + podUID = mirrorPod.UID + } + podStatus, found := kl.statusManager.GetPodStatus(podUID) + if !found { + // If there is no cached status, use the status from the + // apiserver. This is useful if kubelet has recently been + // restarted. + podStatus = pod.Status + } + + containerID, err := kl.validateContainerLogStatus(pod.Name, &podStatus, containerName, logOptions.Previous) + if err != nil { + return err + } + + // Do a zero-byte write to stdout before handing off to the container runtime. + // This ensures at least one Write call is made to the writer when copying starts, + // even if we then block waiting for log output from the container. + if _, err := stdout.Write([]byte{}); err != nil { + return err + } + + return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr) +} + +// GetPhase returns the phase of a pod given its container info. +// This func is exported to simplify integration with 3rd party kubelet +// integrations like kubernetes-mesos. +func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase { + initialized := 0 + pendingInitialization := 0 + failedInitialization := 0 + for _, container := range spec.InitContainers { + containerStatus, ok := api.GetContainerStatus(info, container.Name) + if !ok { + pendingInitialization++ + continue + } + + switch { + case containerStatus.State.Running != nil: + pendingInitialization++ + case containerStatus.State.Terminated != nil: + if containerStatus.State.Terminated.ExitCode == 0 { + initialized++ + } else { + failedInitialization++ + } + case containerStatus.State.Waiting != nil: + if containerStatus.LastTerminationState.Terminated != nil { + if containerStatus.LastTerminationState.Terminated.ExitCode == 0 { + initialized++ + } else { + failedInitialization++ + } + } else { + pendingInitialization++ + } + default: + pendingInitialization++ + } + } + + unknown := 0 + running := 0 + waiting := 0 + stopped := 0 + failed := 0 + succeeded := 0 + for _, container := range spec.Containers { + containerStatus, ok := api.GetContainerStatus(info, container.Name) + if !ok { + unknown++ + continue + } + + switch { + case containerStatus.State.Running != nil: + running++ + case containerStatus.State.Terminated != nil: + stopped++ + if containerStatus.State.Terminated.ExitCode == 0 { + succeeded++ + } else { + failed++ + } + case containerStatus.State.Waiting != nil: + if containerStatus.LastTerminationState.Terminated != nil { + stopped++ + } else { + waiting++ + } + default: + unknown++ + } + } + + if failedInitialization > 0 && spec.RestartPolicy == api.RestartPolicyNever { + return api.PodFailed + } + + switch { + case pendingInitialization > 0: + fallthrough + case waiting > 0: + glog.V(5).Infof("pod waiting > 0, pending") + // One or more containers has not been started + return api.PodPending + case running > 0 && unknown == 0: + // All containers have been started, and at least + // one container is running + return api.PodRunning + case running == 0 && stopped > 0 && unknown == 0: + // All containers are terminated + if spec.RestartPolicy == api.RestartPolicyAlways { + // All containers are in the process of restarting + return api.PodRunning + } + if stopped == succeeded { + // RestartPolicy is not Always, and all + // containers are terminated in success + return api.PodSucceeded + } + if spec.RestartPolicy == api.RestartPolicyNever { + // RestartPolicy is Never, and all containers are + // terminated with at least one in failure + return api.PodFailed + } + // RestartPolicy is OnFailure, and at least one in failure + // and in the process of restarting + return api.PodRunning + default: + glog.V(5).Infof("pod default case, pending") + return api.PodPending + } +} + +// generateAPIPodStatus creates the final API pod status for a pod, given the +// internal pod status. +func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus { + glog.V(3).Infof("Generating status for %q", format.Pod(pod)) + + // check if an internal module has requested the pod is evicted. + for _, podSyncHandler := range kl.PodSyncHandlers { + if result := podSyncHandler.ShouldEvict(pod); result.Evict { + return api.PodStatus{ + Phase: api.PodFailed, + Reason: result.Reason, + Message: result.Message, + } + } + } + + s := kl.convertStatusToAPIStatus(pod, podStatus) + + // Assume info is ready to process + spec := &pod.Spec + allStatus := append(append([]api.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...) + s.Phase = GetPhase(spec, allStatus) + kl.probeManager.UpdatePodStatus(pod.UID, s) + s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase)) + s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase)) + // s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus() + // does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure + // it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true. + if _, oldPodScheduled := api.GetPodCondition(&pod.Status, api.PodScheduled); oldPodScheduled != nil { + s.Conditions = append(s.Conditions, *oldPodScheduled) + } + api.UpdatePodCondition(&pod.Status, &api.PodCondition{ + Type: api.PodScheduled, + Status: api.ConditionTrue, + }) + + if !kl.standaloneMode { + hostIP, err := kl.getHostIPAnyWay() + if err != nil { + glog.V(4).Infof("Cannot get host IP: %v", err) + } else { + s.HostIP = hostIP.String() + if podUsesHostNetwork(pod) && s.PodIP == "" { + s.PodIP = hostIP.String() + } + } + } + + return *s +} + +// convertStatusToAPIStatus creates an api PodStatus for the given pod from +// the given internal pod status. It is purely transformative and does not +// alter the kubelet state at all. +func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) *api.PodStatus { + var apiPodStatus api.PodStatus + apiPodStatus.PodIP = podStatus.IP + + apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses( + pod, podStatus, + pod.Status.ContainerStatuses, + pod.Spec.Containers, + len(pod.Spec.InitContainers) > 0, + false, + ) + apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses( + pod, podStatus, + pod.Status.InitContainerStatuses, + pod.Spec.InitContainers, + len(pod.Spec.InitContainers) > 0, + true, + ) + + return &apiPodStatus +} + +// convertToAPIContainerStatuses converts the given internal container +// statuses into API container statuses. +func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubecontainer.PodStatus, previousStatus []api.ContainerStatus, containers []api.Container, hasInitContainers, isInitContainer bool) []api.ContainerStatus { + convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *api.ContainerStatus { + cid := cs.ID.String() + status := &api.ContainerStatus{ + Name: cs.Name, + RestartCount: int32(cs.RestartCount), + Image: cs.Image, + ImageID: cs.ImageID, + ContainerID: cid, + } + switch cs.State { + case kubecontainer.ContainerStateRunning: + status.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.NewTime(cs.StartedAt)} + case kubecontainer.ContainerStateExited: + status.State.Terminated = &api.ContainerStateTerminated{ + ExitCode: int32(cs.ExitCode), + Reason: cs.Reason, + Message: cs.Message, + StartedAt: unversioned.NewTime(cs.StartedAt), + FinishedAt: unversioned.NewTime(cs.FinishedAt), + ContainerID: cid, + } + default: + status.State.Waiting = &api.ContainerStateWaiting{} + } + return status + } + + // Fetch old containers statuses from old pod status. + oldStatuses := make(map[string]api.ContainerStatus, len(containers)) + for _, status := range previousStatus { + oldStatuses[status.Name] = status + } + + // Set all container statuses to default waiting state + statuses := make(map[string]*api.ContainerStatus, len(containers)) + defaultWaitingState := api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerCreating"}} + if hasInitContainers { + defaultWaitingState = api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "PodInitializing"}} + } + + for _, container := range containers { + status := &api.ContainerStatus{ + Name: container.Name, + Image: container.Image, + State: defaultWaitingState, + } + // Apply some values from the old statuses as the default values. + if oldStatus, found := oldStatuses[container.Name]; found { + status.RestartCount = oldStatus.RestartCount + status.LastTerminationState = oldStatus.LastTerminationState + } + statuses[container.Name] = status + } + + // Make the latest container status comes first. + sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(podStatus.ContainerStatuses))) + // Set container statuses according to the statuses seen in pod status + containerSeen := map[string]int{} + for _, cStatus := range podStatus.ContainerStatuses { + cName := cStatus.Name + if _, ok := statuses[cName]; !ok { + // This would also ignore the infra container. + continue + } + if containerSeen[cName] >= 2 { + continue + } + status := convertContainerStatus(cStatus) + if containerSeen[cName] == 0 { + statuses[cName] = status + } else { + statuses[cName].LastTerminationState = status.State + } + containerSeen[cName] = containerSeen[cName] + 1 + } + + // Handle the containers failed to be started, which should be in Waiting state. + for _, container := range containers { + if isInitContainer { + // If the init container is terminated with exit code 0, it won't be restarted. + // TODO(random-liu): Handle this in a cleaner way. + s := podStatus.FindContainerStatusByName(container.Name) + if s != nil && s.State == kubecontainer.ContainerStateExited && s.ExitCode == 0 { + continue + } + } + // If a container should be restarted in next syncpod, it is *Waiting*. + if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) { + continue + } + status := statuses[container.Name] + reason, message, ok := kl.reasonCache.Get(pod.UID, container.Name) + if !ok { + // In fact, we could also apply Waiting state here, but it is less informative, + // and the container will be restarted soon, so we prefer the original state here. + // Note that with the current implementation of ShouldContainerBeRestarted the original state here + // could be: + // * Waiting: There is no associated historical container and start failure reason record. + // * Terminated: The container is terminated. + continue + } + if status.State.Terminated != nil { + status.LastTerminationState = status.State + } + status.State = api.ContainerState{ + Waiting: &api.ContainerStateWaiting{ + Reason: reason.Error(), + Message: message, + }, + } + statuses[container.Name] = status + } + + var containerStatuses []api.ContainerStatus + for _, status := range statuses { + containerStatuses = append(containerStatuses, *status) + } + + // Sort the container statuses since clients of this interface expect the list + // of containers in a pod has a deterministic order. + if isInitContainer { + kubetypes.SortInitContainerStatuses(pod, containerStatuses) + } else { + sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses)) + } + return containerStatuses +} + +// Returns logs of current machine. +func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) { + // TODO: whitelist logs we are willing to serve + kl.logServer.ServeHTTP(w, req) +} + +// findContainer finds and returns the container with the given pod ID, full name, and container name. +// It returns nil if not found. +func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) { + pods, err := kl.containerRuntime.GetPods(false) + if err != nil { + return nil, err + } + pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID) + return pod.FindContainerByName(containerName), nil +} + +// Run a command in a container, returns the combined stdout, stderr as an array of bytes +func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) { + podUID = kl.podManager.TranslatePodUID(podUID) + + container, err := kl.findContainer(podFullName, podUID, containerName) + if err != nil { + return nil, err + } + if container == nil { + return nil, fmt.Errorf("container not found (%q)", containerName) + } + + var buffer bytes.Buffer + output := ioutils.WriteCloserWrapper(&buffer) + err = kl.runner.ExecInContainer(container.ID, cmd, nil, output, output, false, nil) + // Even if err is non-nil, there still may be output (e.g. the exec wrote to stdout or stderr but + // the command returned a nonzero exit code). Therefore, always return the output along with the + // error. + return buffer.Bytes(), err +} + +// ExecInContainer executes a command in a container, connecting the supplied +// stdin/stdout/stderr to the command's IO streams. +func (kl *Kubelet) ExecInContainer(podFullName string, podUID types.UID, containerName string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size) error { + podUID = kl.podManager.TranslatePodUID(podUID) + + container, err := kl.findContainer(podFullName, podUID, containerName) + if err != nil { + return err + } + if container == nil { + return fmt.Errorf("container not found (%q)", containerName) + } + return kl.runner.ExecInContainer(container.ID, cmd, stdin, stdout, stderr, tty, resize) +} + +// AttachContainer uses the container runtime to attach the given streams to +// the given container. +func (kl *Kubelet) AttachContainer(podFullName string, podUID types.UID, containerName string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size) error { + podUID = kl.podManager.TranslatePodUID(podUID) + + container, err := kl.findContainer(podFullName, podUID, containerName) + if err != nil { + return err + } + if container == nil { + return fmt.Errorf("container not found (%q)", containerName) + } + return kl.containerRuntime.AttachContainer(container.ID, stdin, stdout, stderr, tty, resize) +} + +// PortForward connects to the pod's port and copies data between the port +// and the stream. +func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16, stream io.ReadWriteCloser) error { + podUID = kl.podManager.TranslatePodUID(podUID) + + pods, err := kl.containerRuntime.GetPods(false) + if err != nil { + return err + } + pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID) + if pod.IsEmpty() { + return fmt.Errorf("pod not found (%q)", podFullName) + } + return kl.runner.PortForward(&pod, port, stream) +} diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go new file mode 100644 index 0000000000..fa3f288fc2 --- /dev/null +++ b/pkg/kubelet/kubelet_pods_test.go @@ -0,0 +1,1264 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/term" +) + +func TestMakeMounts(t *testing.T) { + container := api.Container{ + VolumeMounts: []api.VolumeMount{ + { + MountPath: "/etc/hosts", + Name: "disk", + ReadOnly: false, + }, + { + MountPath: "/mnt/path3", + Name: "disk", + ReadOnly: true, + }, + { + MountPath: "/mnt/path4", + Name: "disk4", + ReadOnly: false, + }, + { + MountPath: "/mnt/path5", + Name: "disk5", + ReadOnly: false, + }, + }, + } + + podVolumes := kubecontainer.VolumeMap{ + "disk": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/disk"}}, + "disk4": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/host"}}, + "disk5": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/var/lib/kubelet/podID/volumes/empty/disk5"}}, + } + + pod := api.Pod{ + Spec: api.PodSpec{ + SecurityContext: &api.PodSecurityContext{ + HostNetwork: true, + }, + }, + } + + mounts, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", "", podVolumes) + + expectedMounts := []kubecontainer.Mount{ + { + Name: "disk", + ContainerPath: "/etc/hosts", + HostPath: "/mnt/disk", + ReadOnly: false, + SELinuxRelabel: false, + }, + { + Name: "disk", + ContainerPath: "/mnt/path3", + HostPath: "/mnt/disk", + ReadOnly: true, + SELinuxRelabel: false, + }, + { + Name: "disk4", + ContainerPath: "/mnt/path4", + HostPath: "/mnt/host", + ReadOnly: false, + SELinuxRelabel: false, + }, + { + Name: "disk5", + ContainerPath: "/mnt/path5", + HostPath: "/var/lib/kubelet/podID/volumes/empty/disk5", + ReadOnly: false, + SELinuxRelabel: false, + }, + } + assert.Equal(t, expectedMounts, mounts, "mounts of container %+v", container) +} + +type fakeContainerCommandRunner struct { + // what was passed in + Cmd []string + ID kubecontainer.ContainerID + PodID types.UID + E error + Stdin io.Reader + Stdout io.WriteCloser + Stderr io.WriteCloser + TTY bool + Port uint16 + Stream io.ReadWriteCloser + + // what to return + StdoutData string + StderrData string +} + +func (f *fakeContainerCommandRunner) ExecInContainer(id kubecontainer.ContainerID, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan term.Size) error { + // record params + f.Cmd = cmd + f.ID = id + f.Stdin = in + f.Stdout = out + f.Stderr = err + f.TTY = tty + + // Copy stdout/stderr data + fmt.Fprint(out, f.StdoutData) + fmt.Fprint(out, f.StderrData) + + return f.E +} + +func (f *fakeContainerCommandRunner) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error { + f.PodID = pod.ID + f.Port = port + f.Stream = stream + return nil +} + +func TestRunInContainerNoSuchPod(t *testing.T) { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + fakeRuntime.PodList = []*containertest.FakePod{} + + podName := "podFoo" + podNamespace := "nsFoo" + containerName := "containerFoo" + output, err := kubelet.RunInContainer( + kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}), + "", + containerName, + []string{"ls"}) + assert.Error(t, err) + assert.Nil(t, output, "output should be nil") +} + +func TestRunInContainer(t *testing.T) { + for _, testError := range []error{nil, errors.New("foo")} { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + fakeCommandRunner := fakeContainerCommandRunner{ + E: testError, + StdoutData: "foo", + StderrData: "bar", + } + kubelet.runner = &fakeCommandRunner + + containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} + fakeRuntime.PodList = []*containertest.FakePod{ + {Pod: &kubecontainer.Pod{ + ID: "12345678", + Name: "podFoo", + Namespace: "nsFoo", + Containers: []*kubecontainer.Container{ + {Name: "containerFoo", + ID: containerID, + }, + }, + }}, + } + cmd := []string{"ls"} + actualOutput, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd) + assert.Equal(t, containerID, fakeCommandRunner.ID, "(testError=%v) ID", testError) + assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError) + // this isn't 100% foolproof as a bug in a real ContainerCommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test + assert.Equal(t, "foobar", string(actualOutput), "(testError=%v) output", testError) + assert.Equal(t, fmt.Sprintf("%s", err), fmt.Sprintf("%s", testError), "(testError=%v) err", testError) + } +} + +func TestGenerateRunContainerOptions_DNSConfigurationParams(t *testing.T) { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + kubelet := testKubelet.kubelet + + clusterNS := "203.0.113.1" + kubelet.clusterDomain = "kubernetes.io" + kubelet.clusterDNS = net.ParseIP(clusterNS) + + pods := newTestPods(2) + pods[0].Spec.DNSPolicy = api.DNSClusterFirst + pods[1].Spec.DNSPolicy = api.DNSDefault + + options := make([]*kubecontainer.RunContainerOptions, 2) + for i, pod := range pods { + var err error + options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "") + if err != nil { + t.Fatalf("failed to generate container options: %v", err) + } + } + if len(options[0].DNS) != 1 || options[0].DNS[0] != clusterNS { + t.Errorf("expected nameserver %s, got %+v", clusterNS, options[0].DNS) + } + if len(options[0].DNSSearch) == 0 || options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain { + t.Errorf("expected search %s, got %+v", ".svc."+kubelet.clusterDomain, options[0].DNSSearch) + } + if len(options[1].DNS) != 1 || options[1].DNS[0] != "127.0.0.1" { + t.Errorf("expected nameserver 127.0.0.1, got %+v", options[1].DNS) + } + if len(options[1].DNSSearch) != 1 || options[1].DNSSearch[0] != "." { + t.Errorf("expected search \".\", got %+v", options[1].DNSSearch) + } + + kubelet.resolverConfig = "/etc/resolv.conf" + for i, pod := range pods { + var err error + options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "") + if err != nil { + t.Fatalf("failed to generate container options: %v", err) + } + } + t.Logf("nameservers %+v", options[1].DNS) + if len(options[0].DNS) != 1 { + t.Errorf("expected cluster nameserver only, got %+v", options[0].DNS) + } else if options[0].DNS[0] != clusterNS { + t.Errorf("expected nameserver %s, got %v", clusterNS, options[0].DNS[0]) + } + if len(options[0].DNSSearch) != len(options[1].DNSSearch)+3 { + t.Errorf("expected prepend of cluster domain, got %+v", options[0].DNSSearch) + } else if options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain { + t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch) + } +} + +type testServiceLister struct { + services []*api.Service +} + +func (ls testServiceLister) List(labels.Selector) ([]*api.Service, error) { + return ls.services, nil +} + +type envs []kubecontainer.EnvVar + +func (e envs) Len() int { + return len(e) +} + +func (e envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] } + +func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name } + +func buildService(name, namespace, clusterIP, protocol string, port int) *api.Service { + return &api.Service{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{{ + Protocol: api.Protocol(protocol), + Port: int32(port), + }}, + ClusterIP: clusterIP, + }, + } +} + +func TestMakeEnvironmentVariables(t *testing.T) { + services := []*api.Service{ + buildService("kubernetes", api.NamespaceDefault, "1.2.3.1", "TCP", 8081), + buildService("test", "test1", "1.2.3.3", "TCP", 8083), + buildService("kubernetes", "test2", "1.2.3.4", "TCP", 8084), + buildService("test", "test2", "1.2.3.5", "TCP", 8085), + buildService("test", "test2", "None", "TCP", 8085), + buildService("test", "test2", "", "TCP", 8085), + buildService("kubernetes", "kubernetes", "1.2.3.6", "TCP", 8086), + buildService("not-special", "kubernetes", "1.2.3.8", "TCP", 8088), + buildService("not-special", "kubernetes", "None", "TCP", 8088), + buildService("not-special", "kubernetes", "", "TCP", 8088), + } + + testCases := []struct { + name string // the name of the test case + ns string // the namespace to generate environment for + container *api.Container // the container to use + masterServiceNs string // the namespace to read master service info from + nilLister bool // whether the lister should be nil + expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars + }{ + { + name: "api server = Y, kubelet = Y", + ns: "test1", + container: &api.Container{ + Env: []api.EnvVar{ + {Name: "FOO", Value: "BAR"}, + {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, + {Name: "TEST_SERVICE_PORT", Value: "8083"}, + {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, + {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, + {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, + }, + }, + masterServiceNs: api.NamespaceDefault, + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "FOO", Value: "BAR"}, + {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, + {Name: "TEST_SERVICE_PORT", Value: "8083"}, + {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, + {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, + {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, + {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"}, + {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"}, + {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"}, + {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"}, + {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"}, + {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"}, + {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"}, + }, + }, + { + name: "api server = Y, kubelet = N", + ns: "test1", + container: &api.Container{ + Env: []api.EnvVar{ + {Name: "FOO", Value: "BAR"}, + {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, + {Name: "TEST_SERVICE_PORT", Value: "8083"}, + {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, + {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, + {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, + }, + }, + masterServiceNs: api.NamespaceDefault, + nilLister: true, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "FOO", Value: "BAR"}, + {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, + {Name: "TEST_SERVICE_PORT", Value: "8083"}, + {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, + {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, + {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, + }, + }, + { + name: "api server = N; kubelet = Y", + ns: "test1", + container: &api.Container{ + Env: []api.EnvVar{ + {Name: "FOO", Value: "BAZ"}, + }, + }, + masterServiceNs: api.NamespaceDefault, + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "FOO", Value: "BAZ"}, + {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, + {Name: "TEST_SERVICE_PORT", Value: "8083"}, + {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, + {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, + {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, + {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"}, + {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"}, + {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"}, + {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"}, + {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"}, + {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"}, + {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"}, + }, + }, + { + name: "master service in pod ns", + ns: "test2", + container: &api.Container{ + Env: []api.EnvVar{ + {Name: "FOO", Value: "ZAP"}, + }, + }, + masterServiceNs: "kubernetes", + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "FOO", Value: "ZAP"}, + {Name: "TEST_SERVICE_HOST", Value: "1.2.3.5"}, + {Name: "TEST_SERVICE_PORT", Value: "8085"}, + {Name: "TEST_PORT", Value: "tcp://1.2.3.5:8085"}, + {Name: "TEST_PORT_8085_TCP", Value: "tcp://1.2.3.5:8085"}, + {Name: "TEST_PORT_8085_TCP_PROTO", Value: "tcp"}, + {Name: "TEST_PORT_8085_TCP_PORT", Value: "8085"}, + {Name: "TEST_PORT_8085_TCP_ADDR", Value: "1.2.3.5"}, + {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.4"}, + {Name: "KUBERNETES_SERVICE_PORT", Value: "8084"}, + {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.4:8084"}, + {Name: "KUBERNETES_PORT_8084_TCP", Value: "tcp://1.2.3.4:8084"}, + {Name: "KUBERNETES_PORT_8084_TCP_PROTO", Value: "tcp"}, + {Name: "KUBERNETES_PORT_8084_TCP_PORT", Value: "8084"}, + {Name: "KUBERNETES_PORT_8084_TCP_ADDR", Value: "1.2.3.4"}, + }, + }, + { + name: "pod in master service ns", + ns: "kubernetes", + container: &api.Container{}, + masterServiceNs: "kubernetes", + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "NOT_SPECIAL_SERVICE_HOST", Value: "1.2.3.8"}, + {Name: "NOT_SPECIAL_SERVICE_PORT", Value: "8088"}, + {Name: "NOT_SPECIAL_PORT", Value: "tcp://1.2.3.8:8088"}, + {Name: "NOT_SPECIAL_PORT_8088_TCP", Value: "tcp://1.2.3.8:8088"}, + {Name: "NOT_SPECIAL_PORT_8088_TCP_PROTO", Value: "tcp"}, + {Name: "NOT_SPECIAL_PORT_8088_TCP_PORT", Value: "8088"}, + {Name: "NOT_SPECIAL_PORT_8088_TCP_ADDR", Value: "1.2.3.8"}, + {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.6"}, + {Name: "KUBERNETES_SERVICE_PORT", Value: "8086"}, + {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.6:8086"}, + {Name: "KUBERNETES_PORT_8086_TCP", Value: "tcp://1.2.3.6:8086"}, + {Name: "KUBERNETES_PORT_8086_TCP_PROTO", Value: "tcp"}, + {Name: "KUBERNETES_PORT_8086_TCP_PORT", Value: "8086"}, + {Name: "KUBERNETES_PORT_8086_TCP_ADDR", Value: "1.2.3.6"}, + }, + }, + { + name: "downward api pod", + ns: "downward-api", + container: &api.Container{ + Env: []api.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "POD_NODE_NAME", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "spec.nodeName", + }, + }, + }, + { + Name: "POD_SERVICE_ACCOUNT_NAME", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "spec.serviceAccountName", + }, + }, + }, + { + Name: "POD_IP", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "status.podIP", + }, + }, + }, + }, + }, + masterServiceNs: "nothing", + nilLister: true, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "POD_NAME", Value: "dapi-test-pod-name"}, + {Name: "POD_NAMESPACE", Value: "downward-api"}, + {Name: "POD_NODE_NAME", Value: "node-name"}, + {Name: "POD_SERVICE_ACCOUNT_NAME", Value: "special"}, + {Name: "POD_IP", Value: "1.2.3.4"}, + }, + }, + { + name: "env expansion", + ns: "test1", + container: &api.Container{ + Env: []api.EnvVar{ + { + Name: "TEST_LITERAL", + Value: "test-test-test", + }, + { + Name: "POD_NAME", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "OUT_OF_ORDER_TEST", + Value: "$(OUT_OF_ORDER_TARGET)", + }, + { + Name: "OUT_OF_ORDER_TARGET", + Value: "FOO", + }, + { + Name: "EMPTY_VAR", + }, + { + Name: "EMPTY_TEST", + Value: "foo-$(EMPTY_VAR)", + }, + { + Name: "POD_NAME_TEST2", + Value: "test2-$(POD_NAME)", + }, + { + Name: "POD_NAME_TEST3", + Value: "$(POD_NAME_TEST2)-3", + }, + { + Name: "LITERAL_TEST", + Value: "literal-$(TEST_LITERAL)", + }, + { + Name: "SERVICE_VAR_TEST", + Value: "$(TEST_SERVICE_HOST):$(TEST_SERVICE_PORT)", + }, + { + Name: "TEST_UNDEFINED", + Value: "$(UNDEFINED_VAR)", + }, + }, + }, + masterServiceNs: "nothing", + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + { + Name: "TEST_LITERAL", + Value: "test-test-test", + }, + { + Name: "POD_NAME", + Value: "dapi-test-pod-name", + }, + { + Name: "POD_NAME_TEST2", + Value: "test2-dapi-test-pod-name", + }, + { + Name: "POD_NAME_TEST3", + Value: "test2-dapi-test-pod-name-3", + }, + { + Name: "LITERAL_TEST", + Value: "literal-test-test-test", + }, + { + Name: "TEST_SERVICE_HOST", + Value: "1.2.3.3", + }, + { + Name: "TEST_SERVICE_PORT", + Value: "8083", + }, + { + Name: "TEST_PORT", + Value: "tcp://1.2.3.3:8083", + }, + { + Name: "TEST_PORT_8083_TCP", + Value: "tcp://1.2.3.3:8083", + }, + { + Name: "TEST_PORT_8083_TCP_PROTO", + Value: "tcp", + }, + { + Name: "TEST_PORT_8083_TCP_PORT", + Value: "8083", + }, + { + Name: "TEST_PORT_8083_TCP_ADDR", + Value: "1.2.3.3", + }, + { + Name: "SERVICE_VAR_TEST", + Value: "1.2.3.3:8083", + }, + { + Name: "OUT_OF_ORDER_TEST", + Value: "$(OUT_OF_ORDER_TARGET)", + }, + { + Name: "OUT_OF_ORDER_TARGET", + Value: "FOO", + }, + { + Name: "TEST_UNDEFINED", + Value: "$(UNDEFINED_VAR)", + }, + { + Name: "EMPTY_VAR", + }, + { + Name: "EMPTY_TEST", + Value: "foo-", + }, + }, + }, + } + + for _, tc := range testCases { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + kl := testKubelet.kubelet + kl.masterServiceNamespace = tc.masterServiceNs + if tc.nilLister { + kl.serviceLister = nil + } else { + kl.serviceLister = testServiceLister{services} + } + + testPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Namespace: tc.ns, + Name: "dapi-test-pod-name", + }, + Spec: api.PodSpec{ + ServiceAccountName: "special", + NodeName: "node-name", + }, + } + podIP := "1.2.3.4" + + result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP) + assert.NoError(t, err, "[%s]", tc.name) + + sort.Sort(envs(result)) + sort.Sort(envs(tc.expectedEnvs)) + assert.Equal(t, tc.expectedEnvs, result, "[%s] env entries", tc.name) + } +} + +func waitingState(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + State: api.ContainerState{ + Waiting: &api.ContainerStateWaiting{}, + }, + } +} +func waitingStateWithLastTermination(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + State: api.ContainerState{ + Waiting: &api.ContainerStateWaiting{}, + }, + LastTerminationState: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{ + ExitCode: 0, + }, + }, + } +} +func runningState(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + State: api.ContainerState{ + Running: &api.ContainerStateRunning{}, + }, + } +} +func stoppedState(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + State: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{}, + }, + } +} +func succeededState(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + State: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{ + ExitCode: 0, + }, + }, + } +} +func failedState(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + State: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{ + ExitCode: -1, + }, + }, + } +} + +func TestPodPhaseWithRestartAlways(t *testing.T) { + desiredState := api.PodSpec{ + NodeName: "machine", + Containers: []api.Container{ + {Name: "containerA"}, + {Name: "containerB"}, + }, + RestartPolicy: api.RestartPolicyAlways, + } + + tests := []struct { + pod *api.Pod + status api.PodPhase + test string + }{ + {&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"}, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + runningState("containerB"), + }, + }, + }, + api.PodRunning, + "all running", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + stoppedState("containerA"), + stoppedState("containerB"), + }, + }, + }, + api.PodRunning, + "all stopped with restart always", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + stoppedState("containerB"), + }, + }, + }, + api.PodRunning, + "mixed state #1 with restart always", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + }, + }, + }, + api.PodPending, + "mixed state #2 with restart always", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + waitingState("containerB"), + }, + }, + }, + api.PodPending, + "mixed state #3 with restart always", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + waitingStateWithLastTermination("containerB"), + }, + }, + }, + api.PodRunning, + "backoff crashloop container with restart always", + }, + } + for _, test := range tests { + status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) + assert.Equal(t, test.status, status, "[test %s]", test.test) + } +} + +func TestPodPhaseWithRestartNever(t *testing.T) { + desiredState := api.PodSpec{ + NodeName: "machine", + Containers: []api.Container{ + {Name: "containerA"}, + {Name: "containerB"}, + }, + RestartPolicy: api.RestartPolicyNever, + } + + tests := []struct { + pod *api.Pod + status api.PodPhase + test string + }{ + {&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"}, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + runningState("containerB"), + }, + }, + }, + api.PodRunning, + "all running with restart never", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + succeededState("containerA"), + succeededState("containerB"), + }, + }, + }, + api.PodSucceeded, + "all succeeded with restart never", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + failedState("containerA"), + failedState("containerB"), + }, + }, + }, + api.PodFailed, + "all failed with restart never", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + succeededState("containerB"), + }, + }, + }, + api.PodRunning, + "mixed state #1 with restart never", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + }, + }, + }, + api.PodPending, + "mixed state #2 with restart never", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + waitingState("containerB"), + }, + }, + }, + api.PodPending, + "mixed state #3 with restart never", + }, + } + for _, test := range tests { + status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) + assert.Equal(t, test.status, status, "[test %s]", test.test) + } +} + +func TestPodPhaseWithRestartOnFailure(t *testing.T) { + desiredState := api.PodSpec{ + NodeName: "machine", + Containers: []api.Container{ + {Name: "containerA"}, + {Name: "containerB"}, + }, + RestartPolicy: api.RestartPolicyOnFailure, + } + + tests := []struct { + pod *api.Pod + status api.PodPhase + test string + }{ + {&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"}, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + runningState("containerB"), + }, + }, + }, + api.PodRunning, + "all running with restart onfailure", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + succeededState("containerA"), + succeededState("containerB"), + }, + }, + }, + api.PodSucceeded, + "all succeeded with restart onfailure", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + failedState("containerA"), + failedState("containerB"), + }, + }, + }, + api.PodRunning, + "all failed with restart never", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + succeededState("containerB"), + }, + }, + }, + api.PodRunning, + "mixed state #1 with restart onfailure", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + }, + }, + }, + api.PodPending, + "mixed state #2 with restart onfailure", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + waitingState("containerB"), + }, + }, + }, + api.PodPending, + "mixed state #3 with restart onfailure", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + waitingStateWithLastTermination("containerB"), + }, + }, + }, + api.PodRunning, + "backoff crashloop container with restart onfailure", + }, + } + for _, test := range tests { + status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) + assert.Equal(t, test.status, status, "[test %s]", test.test) + } +} + +func TestExecInContainerNoSuchPod(t *testing.T) { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet.runner = &fakeCommandRunner + fakeRuntime.PodList = []*containertest.FakePod{} + + podName := "podFoo" + podNamespace := "nsFoo" + containerID := "containerFoo" + err := kubelet.ExecInContainer( + kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}), + "", + containerID, + []string{"ls"}, + nil, + nil, + nil, + false, + nil, + ) + require.Error(t, err) + require.True(t, fakeCommandRunner.ID.IsEmpty(), "Unexpected invocation of runner.ExecInContainer") +} + +func TestExecInContainerNoSuchContainer(t *testing.T) { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet.runner = &fakeCommandRunner + + podName := "podFoo" + podNamespace := "nsFoo" + containerID := "containerFoo" + fakeRuntime.PodList = []*containertest.FakePod{ + {Pod: &kubecontainer.Pod{ + ID: "12345678", + Name: podName, + Namespace: podNamespace, + Containers: []*kubecontainer.Container{ + {Name: "bar", + ID: kubecontainer.ContainerID{Type: "test", ID: "barID"}}, + }, + }}, + } + + err := kubelet.ExecInContainer( + kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: podName, + Namespace: podNamespace, + }}), + "", + containerID, + []string{"ls"}, + nil, + nil, + nil, + false, + nil, + ) + require.Error(t, err) + require.True(t, fakeCommandRunner.ID.IsEmpty(), "Unexpected invocation of runner.ExecInContainer") +} + +type fakeReadWriteCloser struct{} + +func (f *fakeReadWriteCloser) Write(data []byte) (int, error) { + return 0, nil +} + +func (f *fakeReadWriteCloser) Read(data []byte) (int, error) { + return 0, nil +} + +func (f *fakeReadWriteCloser) Close() error { + return nil +} + +func TestExecInContainer(t *testing.T) { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet.runner = &fakeCommandRunner + + podName := "podFoo" + podNamespace := "nsFoo" + containerID := "containerFoo" + command := []string{"ls"} + stdin := &bytes.Buffer{} + stdout := &fakeReadWriteCloser{} + stderr := &fakeReadWriteCloser{} + tty := true + fakeRuntime.PodList = []*containertest.FakePod{ + {Pod: &kubecontainer.Pod{ + ID: "12345678", + Name: podName, + Namespace: podNamespace, + Containers: []*kubecontainer.Container{ + {Name: containerID, + ID: kubecontainer.ContainerID{Type: "test", ID: containerID}, + }, + }, + }}, + } + + err := kubelet.ExecInContainer( + kubecontainer.GetPodFullName(podWithUidNameNs("12345678", podName, podNamespace)), + "", + containerID, + []string{"ls"}, + stdin, + stdout, + stderr, + tty, + nil, + ) + require.NoError(t, err) + require.Equal(t, fakeCommandRunner.ID.ID, containerID, "ID") + require.Equal(t, fakeCommandRunner.Cmd, command, "Command") + require.Equal(t, fakeCommandRunner.Stdin, stdin, "Stdin") + require.Equal(t, fakeCommandRunner.Stdout, stdout, "Stdout") + require.Equal(t, fakeCommandRunner.Stderr, stderr, "Stderr") + require.Equal(t, fakeCommandRunner.TTY, tty, "TTY") +} + +func TestPortForwardNoSuchPod(t *testing.T) { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + fakeRuntime.PodList = []*containertest.FakePod{} + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet.runner = &fakeCommandRunner + + podName := "podFoo" + podNamespace := "nsFoo" + var port uint16 = 5000 + + err := kubelet.PortForward( + kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}), + "", + port, + nil, + ) + require.Error(t, err) + require.True(t, fakeCommandRunner.ID.IsEmpty(), "unexpected invocation of runner.PortForward") +} + +func TestPortForward(t *testing.T) { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + + podName := "podFoo" + podNamespace := "nsFoo" + podID := types.UID("12345678") + fakeRuntime.PodList = []*containertest.FakePod{ + {Pod: &kubecontainer.Pod{ + ID: podID, + Name: podName, + Namespace: podNamespace, + Containers: []*kubecontainer.Container{ + { + Name: "foo", + ID: kubecontainer.ContainerID{Type: "test", ID: "containerFoo"}, + }, + }, + }}, + } + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet.runner = &fakeCommandRunner + + var port uint16 = 5000 + stream := &fakeReadWriteCloser{} + err := kubelet.PortForward( + kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: podName, + Namespace: podNamespace, + }}), + "", + port, + stream, + ) + require.NoError(t, err) + require.Equal(t, fakeCommandRunner.PodID, podID, "Pod ID") + require.Equal(t, fakeCommandRunner.Port, port, "Port") + require.Equal(t, fakeCommandRunner.Stream, stream, "stream") +} + +// Tests that identify the host port conflicts are detected correctly. +func TestGetHostPortConflicts(t *testing.T) { + pods := []*api.Pod{ + {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}}, + {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}}, + {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}}, + {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 83}}}}}}, + } + // Pods should not cause any conflict. + assert.False(t, hasHostPortConflicts(pods), "Should not have port conflicts") + + expected := &api.Pod{ + Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}, + } + // The new pod should cause conflict and be reported. + pods = append(pods, expected) + assert.True(t, hasHostPortConflicts(pods), "Should have port conflicts") +} diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 9ca42a117e..28373eb5ca 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -17,12 +17,8 @@ limitations under the License. package kubelet import ( - "bytes" - "errors" "fmt" - "io" "io/ioutil" - "net" "os" "sort" "testing" @@ -34,13 +30,11 @@ import ( "github.com/stretchr/testify/require" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/record" - "k8s.io/kubernetes/pkg/client/testing/core" cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/config" @@ -61,15 +55,12 @@ import ( kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/queue" kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/mount" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/term" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/volume" _ "k8s.io/kubernetes/pkg/volume/host_path" @@ -378,532 +369,6 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) { fakeRuntime.AssertKilledPods([]string{"12345678"}) } -func TestVolumeAttachAndMountControllerDisabled(t *testing.T) { - testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - kubelet := testKubelet.kubelet - - pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ - Volumes: []api.Volume{ - { - Name: "vol1", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ - PDName: "fake-device", - }, - }, - }, - }, - }) - - stopCh := runVolumeManager(kubelet) - defer func() { - close(stopCh) - }() - - kubelet.podManager.SetPods([]*api.Pod{pod}) - err := kubelet.volumeManager.WaitForAttachAndMount(pod) - assert.NoError(t, err) - - podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) - - expectedPodVolumes := []string{"vol1"} - assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) - for _, name := range expectedPodVolumes { - assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod) - } - assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") - assert.NoError(t, volumetest.VerifyWaitForAttachCallCount( - 1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)) - assert.NoError(t, volumetest.VerifyAttachCallCount( - 1 /* expectedAttachCallCount */, testKubelet.volumePlugin)) - assert.NoError(t, volumetest.VerifyMountDeviceCallCount( - 1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)) - assert.NoError(t, volumetest.VerifySetUpCallCount( - 1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)) -} - -func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) { - testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - kubelet := testKubelet.kubelet - - pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ - Volumes: []api.Volume{ - { - Name: "vol1", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ - PDName: "fake-device", - }, - }, - }, - }, - }) - - stopCh := runVolumeManager(kubelet) - defer func() { - close(stopCh) - }() - - // Add pod - kubelet.podManager.SetPods([]*api.Pod{pod}) - - // Verify volumes attached - err := kubelet.volumeManager.WaitForAttachAndMount(pod) - assert.NoError(t, err) - - podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) - - expectedPodVolumes := []string{"vol1"} - assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) - for _, name := range expectedPodVolumes { - assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod) - } - - assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") - assert.NoError(t, volumetest.VerifyWaitForAttachCallCount( - 1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)) - assert.NoError(t, volumetest.VerifyAttachCallCount( - 1 /* expectedAttachCallCount */, testKubelet.volumePlugin)) - assert.NoError(t, volumetest.VerifyMountDeviceCallCount( - 1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)) - assert.NoError(t, volumetest.VerifySetUpCallCount( - 1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)) - - // Remove pod - kubelet.podManager.SetPods([]*api.Pod{}) - - assert.NoError(t, waitForVolumeUnmount(kubelet.volumeManager, pod)) - - // Verify volumes unmounted - podVolumes = kubelet.volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) - - assert.Len(t, podVolumes, 0, - "Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes) - - assert.NoError(t, volumetest.VerifyTearDownCallCount( - 1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)) - - // Verify volumes detached and no longer reported as in use - assert.NoError(t, waitForVolumeDetach(api.UniqueVolumeName("fake/vol1"), kubelet.volumeManager)) - assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") - assert.NoError(t, volumetest.VerifyDetachCallCount( - 1 /* expectedDetachCallCount */, testKubelet.volumePlugin)) -} - -func TestVolumeAttachAndMountControllerEnabled(t *testing.T) { - testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */) - kubelet := testKubelet.kubelet - kubeClient := testKubelet.fakeKubeClient - kubeClient.AddReactor("get", "nodes", - func(action core.Action) (bool, runtime.Object, error) { - return true, &api.Node{ - ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, - Status: api.NodeStatus{ - VolumesAttached: []api.AttachedVolume{ - { - Name: "fake/vol1", - DevicePath: "fake/path", - }, - }}, - Spec: api.NodeSpec{ExternalID: testKubeletHostname}, - }, nil - }) - kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { - return true, nil, fmt.Errorf("no reaction implemented for %s", action) - }) - - pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ - Volumes: []api.Volume{ - { - Name: "vol1", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ - PDName: "fake-device", - }, - }, - }, - }, - }) - - stopCh := runVolumeManager(kubelet) - defer func() { - close(stopCh) - }() - - kubelet.podManager.SetPods([]*api.Pod{pod}) - - // Fake node status update - go simulateVolumeInUseUpdate( - api.UniqueVolumeName("fake/vol1"), - stopCh, - kubelet.volumeManager) - - assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod)) - - podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) - - expectedPodVolumes := []string{"vol1"} - assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) - for _, name := range expectedPodVolumes { - assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod) - } - assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") - assert.NoError(t, volumetest.VerifyWaitForAttachCallCount( - 1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)) - assert.NoError(t, volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin)) - assert.NoError(t, volumetest.VerifyMountDeviceCallCount( - 1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)) - assert.NoError(t, volumetest.VerifySetUpCallCount( - 1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)) -} - -func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { - testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */) - kubelet := testKubelet.kubelet - kubeClient := testKubelet.fakeKubeClient - kubeClient.AddReactor("get", "nodes", - func(action core.Action) (bool, runtime.Object, error) { - return true, &api.Node{ - ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, - Status: api.NodeStatus{ - VolumesAttached: []api.AttachedVolume{ - { - Name: "fake/vol1", - DevicePath: "fake/path", - }, - }}, - Spec: api.NodeSpec{ExternalID: testKubeletHostname}, - }, nil - }) - kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { - return true, nil, fmt.Errorf("no reaction implemented for %s", action) - }) - - pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ - Volumes: []api.Volume{ - { - Name: "vol1", - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ - PDName: "fake-device", - }, - }, - }, - }, - }) - - stopCh := runVolumeManager(kubelet) - defer func() { - close(stopCh) - }() - - // Add pod - kubelet.podManager.SetPods([]*api.Pod{pod}) - - // Fake node status update - go simulateVolumeInUseUpdate( - api.UniqueVolumeName("fake/vol1"), - stopCh, - kubelet.volumeManager) - - // Verify volumes attached - assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod)) - - podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) - - expectedPodVolumes := []string{"vol1"} - assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) - for _, name := range expectedPodVolumes { - assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod) - } - - assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") - assert.NoError(t, volumetest.VerifyWaitForAttachCallCount( - 1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)) - assert.NoError(t, volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin)) - assert.NoError(t, volumetest.VerifyMountDeviceCallCount( - 1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)) - assert.NoError(t, volumetest.VerifySetUpCallCount( - 1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)) - - // Remove pod - kubelet.podManager.SetPods([]*api.Pod{}) - - assert.NoError(t, waitForVolumeUnmount(kubelet.volumeManager, pod)) - - // Verify volumes unmounted - podVolumes = kubelet.volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) - - assert.Len(t, podVolumes, 0, - "Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes) - - assert.NoError(t, volumetest.VerifyTearDownCallCount( - 1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)) - - // Verify volumes detached and no longer reported as in use - assert.NoError(t, waitForVolumeDetach(api.UniqueVolumeName("fake/vol1"), kubelet.volumeManager)) - assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") - assert.NoError(t, volumetest.VerifyZeroDetachCallCount(testKubelet.volumePlugin)) -} - -type stubVolume struct { - path string - volume.MetricsNil -} - -func (f *stubVolume) GetPath() string { - return f.path -} - -func (f *stubVolume) GetAttributes() volume.Attributes { - return volume.Attributes{} -} - -func (f *stubVolume) SetUp(fsGroup *int64) error { - return nil -} - -func (f *stubVolume) SetUpAt(dir string, fsGroup *int64) error { - return nil -} - -func TestMakeVolumeMounts(t *testing.T) { - container := api.Container{ - VolumeMounts: []api.VolumeMount{ - { - MountPath: "/etc/hosts", - Name: "disk", - ReadOnly: false, - }, - { - MountPath: "/mnt/path3", - Name: "disk", - ReadOnly: true, - }, - { - MountPath: "/mnt/path4", - Name: "disk4", - ReadOnly: false, - }, - { - MountPath: "/mnt/path5", - Name: "disk5", - ReadOnly: false, - }, - }, - } - - podVolumes := kubecontainer.VolumeMap{ - "disk": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/disk"}}, - "disk4": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/host"}}, - "disk5": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/var/lib/kubelet/podID/volumes/empty/disk5"}}, - } - - pod := api.Pod{ - Spec: api.PodSpec{ - SecurityContext: &api.PodSecurityContext{ - HostNetwork: true, - }, - }, - } - - mounts, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", "", podVolumes) - - expectedMounts := []kubecontainer.Mount{ - { - Name: "disk", - ContainerPath: "/etc/hosts", - HostPath: "/mnt/disk", - ReadOnly: false, - SELinuxRelabel: false, - }, - { - Name: "disk", - ContainerPath: "/mnt/path3", - HostPath: "/mnt/disk", - ReadOnly: true, - SELinuxRelabel: false, - }, - { - Name: "disk4", - ContainerPath: "/mnt/path4", - HostPath: "/mnt/host", - ReadOnly: false, - SELinuxRelabel: false, - }, - { - Name: "disk5", - ContainerPath: "/mnt/path5", - HostPath: "/var/lib/kubelet/podID/volumes/empty/disk5", - ReadOnly: false, - SELinuxRelabel: false, - }, - } - assert.Equal(t, expectedMounts, mounts, "mounts of container %+v", container) -} - -type fakeContainerCommandRunner struct { - // what was passed in - Cmd []string - ID kubecontainer.ContainerID - PodID types.UID - E error - Stdin io.Reader - Stdout io.WriteCloser - Stderr io.WriteCloser - TTY bool - Port uint16 - Stream io.ReadWriteCloser - - // what to return - StdoutData string - StderrData string -} - -func (f *fakeContainerCommandRunner) ExecInContainer(id kubecontainer.ContainerID, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan term.Size) error { - // record params - f.Cmd = cmd - f.ID = id - f.Stdin = in - f.Stdout = out - f.Stderr = err - f.TTY = tty - - // Copy stdout/stderr data - fmt.Fprint(out, f.StdoutData) - fmt.Fprint(out, f.StderrData) - - return f.E -} - -func (f *fakeContainerCommandRunner) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error { - f.PodID = pod.ID - f.Port = port - f.Stream = stream - return nil -} - -func TestRunInContainerNoSuchPod(t *testing.T) { - testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - kubelet := testKubelet.kubelet - fakeRuntime := testKubelet.fakeRuntime - fakeRuntime.PodList = []*containertest.FakePod{} - - podName := "podFoo" - podNamespace := "nsFoo" - containerName := "containerFoo" - output, err := kubelet.RunInContainer( - kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}), - "", - containerName, - []string{"ls"}) - assert.Error(t, err) - assert.Nil(t, output, "output should be nil") -} - -func TestRunInContainer(t *testing.T) { - for _, testError := range []error{nil, errors.New("foo")} { - testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - kubelet := testKubelet.kubelet - fakeRuntime := testKubelet.fakeRuntime - fakeCommandRunner := fakeContainerCommandRunner{ - E: testError, - StdoutData: "foo", - StderrData: "bar", - } - kubelet.runner = &fakeCommandRunner - - containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} - fakeRuntime.PodList = []*containertest.FakePod{ - {Pod: &kubecontainer.Pod{ - ID: "12345678", - Name: "podFoo", - Namespace: "nsFoo", - Containers: []*kubecontainer.Container{ - {Name: "containerFoo", - ID: containerID, - }, - }, - }}, - } - cmd := []string{"ls"} - actualOutput, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd) - assert.Equal(t, containerID, fakeCommandRunner.ID, "(testError=%v) ID", testError) - assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError) - // this isn't 100% foolproof as a bug in a real ContainerCommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test - assert.Equal(t, "foobar", string(actualOutput), "(testError=%v) output", testError) - assert.Equal(t, fmt.Sprintf("%s", err), fmt.Sprintf("%s", testError), "(testError=%v) err", testError) - } -} - -func TestDNSConfigurationParams(t *testing.T) { - testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - kubelet := testKubelet.kubelet - - clusterNS := "203.0.113.1" - kubelet.clusterDomain = "kubernetes.io" - kubelet.clusterDNS = net.ParseIP(clusterNS) - - pods := newTestPods(2) - pods[0].Spec.DNSPolicy = api.DNSClusterFirst - pods[1].Spec.DNSPolicy = api.DNSDefault - - options := make([]*kubecontainer.RunContainerOptions, 2) - for i, pod := range pods { - var err error - options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "") - if err != nil { - t.Fatalf("failed to generate container options: %v", err) - } - } - if len(options[0].DNS) != 1 || options[0].DNS[0] != clusterNS { - t.Errorf("expected nameserver %s, got %+v", clusterNS, options[0].DNS) - } - if len(options[0].DNSSearch) == 0 || options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain { - t.Errorf("expected search %s, got %+v", ".svc."+kubelet.clusterDomain, options[0].DNSSearch) - } - if len(options[1].DNS) != 1 || options[1].DNS[0] != "127.0.0.1" { - t.Errorf("expected nameserver 127.0.0.1, got %+v", options[1].DNS) - } - if len(options[1].DNSSearch) != 1 || options[1].DNSSearch[0] != "." { - t.Errorf("expected search \".\", got %+v", options[1].DNSSearch) - } - - kubelet.resolverConfig = "/etc/resolv.conf" - for i, pod := range pods { - var err error - options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "") - if err != nil { - t.Fatalf("failed to generate container options: %v", err) - } - } - t.Logf("nameservers %+v", options[1].DNS) - if len(options[0].DNS) != 1 { - t.Errorf("expected cluster nameserver only, got %+v", options[0].DNS) - } else if options[0].DNS[0] != clusterNS { - t.Errorf("expected nameserver %s, got %v", clusterNS, options[0].DNS[0]) - } - if len(options[0].DNSSearch) != len(options[1].DNSSearch)+3 { - t.Errorf("expected prepend of cluster domain, got %+v", options[0].DNSSearch) - } else if options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain { - t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch) - } -} - -type testServiceLister struct { - services []*api.Service -} - -func (ls testServiceLister) List(labels.Selector) ([]*api.Service, error) { - return ls.services, nil -} - type testNodeLister struct { nodes []api.Node } @@ -927,1002 +392,6 @@ func (ls testNodeLister) List() (api.NodeList, error) { }, nil } -type envs []kubecontainer.EnvVar - -func (e envs) Len() int { - return len(e) -} - -func (e envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] } - -func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name } - -func buildService(name, namespace, clusterIP, protocol string, port int) *api.Service { - return &api.Service{ - ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace}, - Spec: api.ServiceSpec{ - Ports: []api.ServicePort{{ - Protocol: api.Protocol(protocol), - Port: int32(port), - }}, - ClusterIP: clusterIP, - }, - } -} - -func TestMakeEnvironmentVariables(t *testing.T) { - services := []*api.Service{ - buildService("kubernetes", api.NamespaceDefault, "1.2.3.1", "TCP", 8081), - buildService("test", "test1", "1.2.3.3", "TCP", 8083), - buildService("kubernetes", "test2", "1.2.3.4", "TCP", 8084), - buildService("test", "test2", "1.2.3.5", "TCP", 8085), - buildService("test", "test2", "None", "TCP", 8085), - buildService("test", "test2", "", "TCP", 8085), - buildService("kubernetes", "kubernetes", "1.2.3.6", "TCP", 8086), - buildService("not-special", "kubernetes", "1.2.3.8", "TCP", 8088), - buildService("not-special", "kubernetes", "None", "TCP", 8088), - buildService("not-special", "kubernetes", "", "TCP", 8088), - } - - testCases := []struct { - name string // the name of the test case - ns string // the namespace to generate environment for - container *api.Container // the container to use - masterServiceNs string // the namespace to read master service info from - nilLister bool // whether the lister should be nil - expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars - }{ - { - name: "api server = Y, kubelet = Y", - ns: "test1", - container: &api.Container{ - Env: []api.EnvVar{ - {Name: "FOO", Value: "BAR"}, - {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, - {Name: "TEST_SERVICE_PORT", Value: "8083"}, - {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, - {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, - {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, - {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, - {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, - }, - }, - masterServiceNs: api.NamespaceDefault, - nilLister: false, - expectedEnvs: []kubecontainer.EnvVar{ - {Name: "FOO", Value: "BAR"}, - {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, - {Name: "TEST_SERVICE_PORT", Value: "8083"}, - {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, - {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, - {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, - {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, - {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, - {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"}, - {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"}, - {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"}, - {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"}, - {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"}, - {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"}, - {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"}, - }, - }, - { - name: "api server = Y, kubelet = N", - ns: "test1", - container: &api.Container{ - Env: []api.EnvVar{ - {Name: "FOO", Value: "BAR"}, - {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, - {Name: "TEST_SERVICE_PORT", Value: "8083"}, - {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, - {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, - {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, - {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, - {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, - }, - }, - masterServiceNs: api.NamespaceDefault, - nilLister: true, - expectedEnvs: []kubecontainer.EnvVar{ - {Name: "FOO", Value: "BAR"}, - {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, - {Name: "TEST_SERVICE_PORT", Value: "8083"}, - {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, - {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, - {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, - {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, - {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, - }, - }, - { - name: "api server = N; kubelet = Y", - ns: "test1", - container: &api.Container{ - Env: []api.EnvVar{ - {Name: "FOO", Value: "BAZ"}, - }, - }, - masterServiceNs: api.NamespaceDefault, - nilLister: false, - expectedEnvs: []kubecontainer.EnvVar{ - {Name: "FOO", Value: "BAZ"}, - {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, - {Name: "TEST_SERVICE_PORT", Value: "8083"}, - {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, - {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, - {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, - {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, - {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, - {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"}, - {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"}, - {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"}, - {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"}, - {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"}, - {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"}, - {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"}, - }, - }, - { - name: "master service in pod ns", - ns: "test2", - container: &api.Container{ - Env: []api.EnvVar{ - {Name: "FOO", Value: "ZAP"}, - }, - }, - masterServiceNs: "kubernetes", - nilLister: false, - expectedEnvs: []kubecontainer.EnvVar{ - {Name: "FOO", Value: "ZAP"}, - {Name: "TEST_SERVICE_HOST", Value: "1.2.3.5"}, - {Name: "TEST_SERVICE_PORT", Value: "8085"}, - {Name: "TEST_PORT", Value: "tcp://1.2.3.5:8085"}, - {Name: "TEST_PORT_8085_TCP", Value: "tcp://1.2.3.5:8085"}, - {Name: "TEST_PORT_8085_TCP_PROTO", Value: "tcp"}, - {Name: "TEST_PORT_8085_TCP_PORT", Value: "8085"}, - {Name: "TEST_PORT_8085_TCP_ADDR", Value: "1.2.3.5"}, - {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.4"}, - {Name: "KUBERNETES_SERVICE_PORT", Value: "8084"}, - {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.4:8084"}, - {Name: "KUBERNETES_PORT_8084_TCP", Value: "tcp://1.2.3.4:8084"}, - {Name: "KUBERNETES_PORT_8084_TCP_PROTO", Value: "tcp"}, - {Name: "KUBERNETES_PORT_8084_TCP_PORT", Value: "8084"}, - {Name: "KUBERNETES_PORT_8084_TCP_ADDR", Value: "1.2.3.4"}, - }, - }, - { - name: "pod in master service ns", - ns: "kubernetes", - container: &api.Container{}, - masterServiceNs: "kubernetes", - nilLister: false, - expectedEnvs: []kubecontainer.EnvVar{ - {Name: "NOT_SPECIAL_SERVICE_HOST", Value: "1.2.3.8"}, - {Name: "NOT_SPECIAL_SERVICE_PORT", Value: "8088"}, - {Name: "NOT_SPECIAL_PORT", Value: "tcp://1.2.3.8:8088"}, - {Name: "NOT_SPECIAL_PORT_8088_TCP", Value: "tcp://1.2.3.8:8088"}, - {Name: "NOT_SPECIAL_PORT_8088_TCP_PROTO", Value: "tcp"}, - {Name: "NOT_SPECIAL_PORT_8088_TCP_PORT", Value: "8088"}, - {Name: "NOT_SPECIAL_PORT_8088_TCP_ADDR", Value: "1.2.3.8"}, - {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.6"}, - {Name: "KUBERNETES_SERVICE_PORT", Value: "8086"}, - {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.6:8086"}, - {Name: "KUBERNETES_PORT_8086_TCP", Value: "tcp://1.2.3.6:8086"}, - {Name: "KUBERNETES_PORT_8086_TCP_PROTO", Value: "tcp"}, - {Name: "KUBERNETES_PORT_8086_TCP_PORT", Value: "8086"}, - {Name: "KUBERNETES_PORT_8086_TCP_ADDR", Value: "1.2.3.6"}, - }, - }, - { - name: "downward api pod", - ns: "downward-api", - container: &api.Container{ - Env: []api.EnvVar{ - { - Name: "POD_NAME", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Default.GroupVersion().String(), - FieldPath: "metadata.name", - }, - }, - }, - { - Name: "POD_NAMESPACE", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Default.GroupVersion().String(), - FieldPath: "metadata.namespace", - }, - }, - }, - { - Name: "POD_NODE_NAME", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Default.GroupVersion().String(), - FieldPath: "spec.nodeName", - }, - }, - }, - { - Name: "POD_SERVICE_ACCOUNT_NAME", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Default.GroupVersion().String(), - FieldPath: "spec.serviceAccountName", - }, - }, - }, - { - Name: "POD_IP", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Default.GroupVersion().String(), - FieldPath: "status.podIP", - }, - }, - }, - }, - }, - masterServiceNs: "nothing", - nilLister: true, - expectedEnvs: []kubecontainer.EnvVar{ - {Name: "POD_NAME", Value: "dapi-test-pod-name"}, - {Name: "POD_NAMESPACE", Value: "downward-api"}, - {Name: "POD_NODE_NAME", Value: "node-name"}, - {Name: "POD_SERVICE_ACCOUNT_NAME", Value: "special"}, - {Name: "POD_IP", Value: "1.2.3.4"}, - }, - }, - { - name: "env expansion", - ns: "test1", - container: &api.Container{ - Env: []api.EnvVar{ - { - Name: "TEST_LITERAL", - Value: "test-test-test", - }, - { - Name: "POD_NAME", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Default.GroupVersion().String(), - FieldPath: "metadata.name", - }, - }, - }, - { - Name: "OUT_OF_ORDER_TEST", - Value: "$(OUT_OF_ORDER_TARGET)", - }, - { - Name: "OUT_OF_ORDER_TARGET", - Value: "FOO", - }, - { - Name: "EMPTY_VAR", - }, - { - Name: "EMPTY_TEST", - Value: "foo-$(EMPTY_VAR)", - }, - { - Name: "POD_NAME_TEST2", - Value: "test2-$(POD_NAME)", - }, - { - Name: "POD_NAME_TEST3", - Value: "$(POD_NAME_TEST2)-3", - }, - { - Name: "LITERAL_TEST", - Value: "literal-$(TEST_LITERAL)", - }, - { - Name: "SERVICE_VAR_TEST", - Value: "$(TEST_SERVICE_HOST):$(TEST_SERVICE_PORT)", - }, - { - Name: "TEST_UNDEFINED", - Value: "$(UNDEFINED_VAR)", - }, - }, - }, - masterServiceNs: "nothing", - nilLister: false, - expectedEnvs: []kubecontainer.EnvVar{ - { - Name: "TEST_LITERAL", - Value: "test-test-test", - }, - { - Name: "POD_NAME", - Value: "dapi-test-pod-name", - }, - { - Name: "POD_NAME_TEST2", - Value: "test2-dapi-test-pod-name", - }, - { - Name: "POD_NAME_TEST3", - Value: "test2-dapi-test-pod-name-3", - }, - { - Name: "LITERAL_TEST", - Value: "literal-test-test-test", - }, - { - Name: "TEST_SERVICE_HOST", - Value: "1.2.3.3", - }, - { - Name: "TEST_SERVICE_PORT", - Value: "8083", - }, - { - Name: "TEST_PORT", - Value: "tcp://1.2.3.3:8083", - }, - { - Name: "TEST_PORT_8083_TCP", - Value: "tcp://1.2.3.3:8083", - }, - { - Name: "TEST_PORT_8083_TCP_PROTO", - Value: "tcp", - }, - { - Name: "TEST_PORT_8083_TCP_PORT", - Value: "8083", - }, - { - Name: "TEST_PORT_8083_TCP_ADDR", - Value: "1.2.3.3", - }, - { - Name: "SERVICE_VAR_TEST", - Value: "1.2.3.3:8083", - }, - { - Name: "OUT_OF_ORDER_TEST", - Value: "$(OUT_OF_ORDER_TARGET)", - }, - { - Name: "OUT_OF_ORDER_TARGET", - Value: "FOO", - }, - { - Name: "TEST_UNDEFINED", - Value: "$(UNDEFINED_VAR)", - }, - { - Name: "EMPTY_VAR", - }, - { - Name: "EMPTY_TEST", - Value: "foo-", - }, - }, - }, - } - - for _, tc := range testCases { - testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - kl := testKubelet.kubelet - kl.masterServiceNamespace = tc.masterServiceNs - if tc.nilLister { - kl.serviceLister = nil - } else { - kl.serviceLister = testServiceLister{services} - } - - testPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ - Namespace: tc.ns, - Name: "dapi-test-pod-name", - }, - Spec: api.PodSpec{ - ServiceAccountName: "special", - NodeName: "node-name", - }, - } - podIP := "1.2.3.4" - - result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP) - assert.NoError(t, err, "[%s]", tc.name) - - sort.Sort(envs(result)) - sort.Sort(envs(tc.expectedEnvs)) - assert.Equal(t, tc.expectedEnvs, result, "[%s] env entries", tc.name) - } -} - -func waitingState(cName string) api.ContainerStatus { - return api.ContainerStatus{ - Name: cName, - State: api.ContainerState{ - Waiting: &api.ContainerStateWaiting{}, - }, - } -} -func waitingStateWithLastTermination(cName string) api.ContainerStatus { - return api.ContainerStatus{ - Name: cName, - State: api.ContainerState{ - Waiting: &api.ContainerStateWaiting{}, - }, - LastTerminationState: api.ContainerState{ - Terminated: &api.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - } -} -func runningState(cName string) api.ContainerStatus { - return api.ContainerStatus{ - Name: cName, - State: api.ContainerState{ - Running: &api.ContainerStateRunning{}, - }, - } -} -func stoppedState(cName string) api.ContainerStatus { - return api.ContainerStatus{ - Name: cName, - State: api.ContainerState{ - Terminated: &api.ContainerStateTerminated{}, - }, - } -} -func succeededState(cName string) api.ContainerStatus { - return api.ContainerStatus{ - Name: cName, - State: api.ContainerState{ - Terminated: &api.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - } -} -func failedState(cName string) api.ContainerStatus { - return api.ContainerStatus{ - Name: cName, - State: api.ContainerState{ - Terminated: &api.ContainerStateTerminated{ - ExitCode: -1, - }, - }, - } -} - -func TestPodPhaseWithRestartAlways(t *testing.T) { - desiredState := api.PodSpec{ - NodeName: "machine", - Containers: []api.Container{ - {Name: "containerA"}, - {Name: "containerB"}, - }, - RestartPolicy: api.RestartPolicyAlways, - } - - tests := []struct { - pod *api.Pod - status api.PodPhase - test string - }{ - {&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"}, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - runningState("containerB"), - }, - }, - }, - api.PodRunning, - "all running", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - stoppedState("containerA"), - stoppedState("containerB"), - }, - }, - }, - api.PodRunning, - "all stopped with restart always", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - stoppedState("containerB"), - }, - }, - }, - api.PodRunning, - "mixed state #1 with restart always", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - }, - }, - }, - api.PodPending, - "mixed state #2 with restart always", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - waitingState("containerB"), - }, - }, - }, - api.PodPending, - "mixed state #3 with restart always", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - waitingStateWithLastTermination("containerB"), - }, - }, - }, - api.PodRunning, - "backoff crashloop container with restart always", - }, - } - for _, test := range tests { - status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) - assert.Equal(t, test.status, status, "[test %s]", test.test) - } -} - -func TestPodPhaseWithRestartNever(t *testing.T) { - desiredState := api.PodSpec{ - NodeName: "machine", - Containers: []api.Container{ - {Name: "containerA"}, - {Name: "containerB"}, - }, - RestartPolicy: api.RestartPolicyNever, - } - - tests := []struct { - pod *api.Pod - status api.PodPhase - test string - }{ - {&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"}, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - runningState("containerB"), - }, - }, - }, - api.PodRunning, - "all running with restart never", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - succeededState("containerA"), - succeededState("containerB"), - }, - }, - }, - api.PodSucceeded, - "all succeeded with restart never", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - failedState("containerA"), - failedState("containerB"), - }, - }, - }, - api.PodFailed, - "all failed with restart never", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - succeededState("containerB"), - }, - }, - }, - api.PodRunning, - "mixed state #1 with restart never", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - }, - }, - }, - api.PodPending, - "mixed state #2 with restart never", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - waitingState("containerB"), - }, - }, - }, - api.PodPending, - "mixed state #3 with restart never", - }, - } - for _, test := range tests { - status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) - assert.Equal(t, test.status, status, "[test %s]", test.test) - } -} - -func TestPodPhaseWithRestartOnFailure(t *testing.T) { - desiredState := api.PodSpec{ - NodeName: "machine", - Containers: []api.Container{ - {Name: "containerA"}, - {Name: "containerB"}, - }, - RestartPolicy: api.RestartPolicyOnFailure, - } - - tests := []struct { - pod *api.Pod - status api.PodPhase - test string - }{ - {&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"}, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - runningState("containerB"), - }, - }, - }, - api.PodRunning, - "all running with restart onfailure", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - succeededState("containerA"), - succeededState("containerB"), - }, - }, - }, - api.PodSucceeded, - "all succeeded with restart onfailure", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - failedState("containerA"), - failedState("containerB"), - }, - }, - }, - api.PodRunning, - "all failed with restart never", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - succeededState("containerB"), - }, - }, - }, - api.PodRunning, - "mixed state #1 with restart onfailure", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - }, - }, - }, - api.PodPending, - "mixed state #2 with restart onfailure", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - waitingState("containerB"), - }, - }, - }, - api.PodPending, - "mixed state #3 with restart onfailure", - }, - { - &api.Pod{ - Spec: desiredState, - Status: api.PodStatus{ - ContainerStatuses: []api.ContainerStatus{ - runningState("containerA"), - waitingStateWithLastTermination("containerB"), - }, - }, - }, - api.PodRunning, - "backoff crashloop container with restart onfailure", - }, - } - for _, test := range tests { - status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) - assert.Equal(t, test.status, status, "[test %s]", test.test) - } -} - -func TestExecInContainerNoSuchPod(t *testing.T) { - testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - kubelet := testKubelet.kubelet - fakeRuntime := testKubelet.fakeRuntime - fakeCommandRunner := fakeContainerCommandRunner{} - kubelet.runner = &fakeCommandRunner - fakeRuntime.PodList = []*containertest.FakePod{} - - podName := "podFoo" - podNamespace := "nsFoo" - containerID := "containerFoo" - err := kubelet.ExecInContainer( - kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}), - "", - containerID, - []string{"ls"}, - nil, - nil, - nil, - false, - nil, - ) - require.Error(t, err) - require.True(t, fakeCommandRunner.ID.IsEmpty(), "Unexpected invocation of runner.ExecInContainer") -} - -func TestExecInContainerNoSuchContainer(t *testing.T) { - testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - kubelet := testKubelet.kubelet - fakeRuntime := testKubelet.fakeRuntime - fakeCommandRunner := fakeContainerCommandRunner{} - kubelet.runner = &fakeCommandRunner - - podName := "podFoo" - podNamespace := "nsFoo" - containerID := "containerFoo" - fakeRuntime.PodList = []*containertest.FakePod{ - {Pod: &kubecontainer.Pod{ - ID: "12345678", - Name: podName, - Namespace: podNamespace, - Containers: []*kubecontainer.Container{ - {Name: "bar", - ID: kubecontainer.ContainerID{Type: "test", ID: "barID"}}, - }, - }}, - } - - err := kubelet.ExecInContainer( - kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{ - UID: "12345678", - Name: podName, - Namespace: podNamespace, - }}), - "", - containerID, - []string{"ls"}, - nil, - nil, - nil, - false, - nil, - ) - require.Error(t, err) - require.True(t, fakeCommandRunner.ID.IsEmpty(), "Unexpected invocation of runner.ExecInContainer") -} - -type fakeReadWriteCloser struct{} - -func (f *fakeReadWriteCloser) Write(data []byte) (int, error) { - return 0, nil -} - -func (f *fakeReadWriteCloser) Read(data []byte) (int, error) { - return 0, nil -} - -func (f *fakeReadWriteCloser) Close() error { - return nil -} - -func TestExecInContainer(t *testing.T) { - testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - kubelet := testKubelet.kubelet - fakeRuntime := testKubelet.fakeRuntime - fakeCommandRunner := fakeContainerCommandRunner{} - kubelet.runner = &fakeCommandRunner - - podName := "podFoo" - podNamespace := "nsFoo" - containerID := "containerFoo" - command := []string{"ls"} - stdin := &bytes.Buffer{} - stdout := &fakeReadWriteCloser{} - stderr := &fakeReadWriteCloser{} - tty := true - fakeRuntime.PodList = []*containertest.FakePod{ - {Pod: &kubecontainer.Pod{ - ID: "12345678", - Name: podName, - Namespace: podNamespace, - Containers: []*kubecontainer.Container{ - {Name: containerID, - ID: kubecontainer.ContainerID{Type: "test", ID: containerID}, - }, - }, - }}, - } - - err := kubelet.ExecInContainer( - kubecontainer.GetPodFullName(podWithUidNameNs("12345678", podName, podNamespace)), - "", - containerID, - []string{"ls"}, - stdin, - stdout, - stderr, - tty, - nil, - ) - require.NoError(t, err) - require.Equal(t, fakeCommandRunner.ID.ID, containerID, "ID") - require.Equal(t, fakeCommandRunner.Cmd, command, "Command") - require.Equal(t, fakeCommandRunner.Stdin, stdin, "Stdin") - require.Equal(t, fakeCommandRunner.Stdout, stdout, "Stdout") - require.Equal(t, fakeCommandRunner.Stderr, stderr, "Stderr") - require.Equal(t, fakeCommandRunner.TTY, tty, "TTY") -} - -func TestPortForwardNoSuchPod(t *testing.T) { - testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - kubelet := testKubelet.kubelet - fakeRuntime := testKubelet.fakeRuntime - fakeRuntime.PodList = []*containertest.FakePod{} - fakeCommandRunner := fakeContainerCommandRunner{} - kubelet.runner = &fakeCommandRunner - - podName := "podFoo" - podNamespace := "nsFoo" - var port uint16 = 5000 - - err := kubelet.PortForward( - kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}), - "", - port, - nil, - ) - require.Error(t, err) - require.True(t, fakeCommandRunner.ID.IsEmpty(), "unexpected invocation of runner.PortForward") -} - -func TestPortForward(t *testing.T) { - testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) - kubelet := testKubelet.kubelet - fakeRuntime := testKubelet.fakeRuntime - - podName := "podFoo" - podNamespace := "nsFoo" - podID := types.UID("12345678") - fakeRuntime.PodList = []*containertest.FakePod{ - {Pod: &kubecontainer.Pod{ - ID: podID, - Name: podName, - Namespace: podNamespace, - Containers: []*kubecontainer.Container{ - { - Name: "foo", - ID: kubecontainer.ContainerID{Type: "test", ID: "containerFoo"}, - }, - }, - }}, - } - fakeCommandRunner := fakeContainerCommandRunner{} - kubelet.runner = &fakeCommandRunner - - var port uint16 = 5000 - stream := &fakeReadWriteCloser{} - err := kubelet.PortForward( - kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{ - UID: "12345678", - Name: podName, - Namespace: podNamespace, - }}), - "", - port, - stream, - ) - require.NoError(t, err) - require.Equal(t, fakeCommandRunner.PodID, podID, "Pod ID") - require.Equal(t, fakeCommandRunner.Port, port, "Port") - require.Equal(t, fakeCommandRunner.Stream, stream, "stream") -} - -// Tests that identify the host port conflicts are detected correctly. -func TestGetHostPortConflicts(t *testing.T) { - pods := []*api.Pod{ - {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}}, - {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}}, - {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}}, - {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 83}}}}}}, - } - // Pods should not cause any conflict. - assert.False(t, hasHostPortConflicts(pods), "Should not have port conflicts") - - expected := &api.Pod{ - Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}, - } - // The new pod should cause conflict and be reported. - pods = append(pods, expected) - assert.True(t, hasHostPortConflicts(pods), "Should have port conflicts") -} - // Tests that we handle port conflicts correctly by setting the failed status in status map. func TestHandlePortConflicts(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) diff --git a/pkg/kubelet/kubelet_volumes_test.go b/pkg/kubelet/kubelet_volumes_test.go index 3c0f6cd2e7..a36dce4f89 100644 --- a/pkg/kubelet/kubelet_volumes_test.go +++ b/pkg/kubelet/kubelet_volumes_test.go @@ -17,9 +17,16 @@ limitations under the License. package kubelet import ( + "fmt" "testing" + "github.com/stretchr/testify/assert" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/volume" + volumetest "k8s.io/kubernetes/pkg/volume/testing" + "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) func TestPodVolumesExist(t *testing.T) { @@ -105,3 +112,298 @@ func TestPodVolumesExist(t *testing.T) { } } } + +func TestVolumeAttachAndMountControllerDisabled(t *testing.T) { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + kubelet := testKubelet.kubelet + + pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ + Volumes: []api.Volume{ + { + Name: "vol1", + VolumeSource: api.VolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + PDName: "fake-device", + }, + }, + }, + }, + }) + + stopCh := runVolumeManager(kubelet) + defer func() { + close(stopCh) + }() + + kubelet.podManager.SetPods([]*api.Pod{pod}) + err := kubelet.volumeManager.WaitForAttachAndMount(pod) + assert.NoError(t, err) + + podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( + volumehelper.GetUniquePodName(pod)) + + expectedPodVolumes := []string{"vol1"} + assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) + for _, name := range expectedPodVolumes { + assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod) + } + assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") + assert.NoError(t, volumetest.VerifyWaitForAttachCallCount( + 1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)) + assert.NoError(t, volumetest.VerifyAttachCallCount( + 1 /* expectedAttachCallCount */, testKubelet.volumePlugin)) + assert.NoError(t, volumetest.VerifyMountDeviceCallCount( + 1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)) + assert.NoError(t, volumetest.VerifySetUpCallCount( + 1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)) +} + +func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + kubelet := testKubelet.kubelet + + pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ + Volumes: []api.Volume{ + { + Name: "vol1", + VolumeSource: api.VolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + PDName: "fake-device", + }, + }, + }, + }, + }) + + stopCh := runVolumeManager(kubelet) + defer func() { + close(stopCh) + }() + + // Add pod + kubelet.podManager.SetPods([]*api.Pod{pod}) + + // Verify volumes attached + err := kubelet.volumeManager.WaitForAttachAndMount(pod) + assert.NoError(t, err) + + podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( + volumehelper.GetUniquePodName(pod)) + + expectedPodVolumes := []string{"vol1"} + assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) + for _, name := range expectedPodVolumes { + assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod) + } + + assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") + assert.NoError(t, volumetest.VerifyWaitForAttachCallCount( + 1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)) + assert.NoError(t, volumetest.VerifyAttachCallCount( + 1 /* expectedAttachCallCount */, testKubelet.volumePlugin)) + assert.NoError(t, volumetest.VerifyMountDeviceCallCount( + 1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)) + assert.NoError(t, volumetest.VerifySetUpCallCount( + 1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)) + + // Remove pod + kubelet.podManager.SetPods([]*api.Pod{}) + + assert.NoError(t, waitForVolumeUnmount(kubelet.volumeManager, pod)) + + // Verify volumes unmounted + podVolumes = kubelet.volumeManager.GetMountedVolumesForPod( + volumehelper.GetUniquePodName(pod)) + + assert.Len(t, podVolumes, 0, + "Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes) + + assert.NoError(t, volumetest.VerifyTearDownCallCount( + 1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)) + + // Verify volumes detached and no longer reported as in use + assert.NoError(t, waitForVolumeDetach(api.UniqueVolumeName("fake/vol1"), kubelet.volumeManager)) + assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") + assert.NoError(t, volumetest.VerifyDetachCallCount( + 1 /* expectedDetachCallCount */, testKubelet.volumePlugin)) +} + +func TestVolumeAttachAndMountControllerEnabled(t *testing.T) { + testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */) + kubelet := testKubelet.kubelet + kubeClient := testKubelet.fakeKubeClient + kubeClient.AddReactor("get", "nodes", + func(action core.Action) (bool, runtime.Object, error) { + return true, &api.Node{ + ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, + Status: api.NodeStatus{ + VolumesAttached: []api.AttachedVolume{ + { + Name: "fake/vol1", + DevicePath: "fake/path", + }, + }}, + Spec: api.NodeSpec{ExternalID: testKubeletHostname}, + }, nil + }) + kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { + return true, nil, fmt.Errorf("no reaction implemented for %s", action) + }) + + pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ + Volumes: []api.Volume{ + { + Name: "vol1", + VolumeSource: api.VolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + PDName: "fake-device", + }, + }, + }, + }, + }) + + stopCh := runVolumeManager(kubelet) + defer func() { + close(stopCh) + }() + + kubelet.podManager.SetPods([]*api.Pod{pod}) + + // Fake node status update + go simulateVolumeInUseUpdate( + api.UniqueVolumeName("fake/vol1"), + stopCh, + kubelet.volumeManager) + + assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod)) + + podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( + volumehelper.GetUniquePodName(pod)) + + expectedPodVolumes := []string{"vol1"} + assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) + for _, name := range expectedPodVolumes { + assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod) + } + assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") + assert.NoError(t, volumetest.VerifyWaitForAttachCallCount( + 1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)) + assert.NoError(t, volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin)) + assert.NoError(t, volumetest.VerifyMountDeviceCallCount( + 1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)) + assert.NoError(t, volumetest.VerifySetUpCallCount( + 1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)) +} + +func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { + testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */) + kubelet := testKubelet.kubelet + kubeClient := testKubelet.fakeKubeClient + kubeClient.AddReactor("get", "nodes", + func(action core.Action) (bool, runtime.Object, error) { + return true, &api.Node{ + ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, + Status: api.NodeStatus{ + VolumesAttached: []api.AttachedVolume{ + { + Name: "fake/vol1", + DevicePath: "fake/path", + }, + }}, + Spec: api.NodeSpec{ExternalID: testKubeletHostname}, + }, nil + }) + kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { + return true, nil, fmt.Errorf("no reaction implemented for %s", action) + }) + + pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ + Volumes: []api.Volume{ + { + Name: "vol1", + VolumeSource: api.VolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + PDName: "fake-device", + }, + }, + }, + }, + }) + + stopCh := runVolumeManager(kubelet) + defer func() { + close(stopCh) + }() + + // Add pod + kubelet.podManager.SetPods([]*api.Pod{pod}) + + // Fake node status update + go simulateVolumeInUseUpdate( + api.UniqueVolumeName("fake/vol1"), + stopCh, + kubelet.volumeManager) + + // Verify volumes attached + assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod)) + + podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( + volumehelper.GetUniquePodName(pod)) + + expectedPodVolumes := []string{"vol1"} + assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) + for _, name := range expectedPodVolumes { + assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod) + } + + assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") + assert.NoError(t, volumetest.VerifyWaitForAttachCallCount( + 1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)) + assert.NoError(t, volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin)) + assert.NoError(t, volumetest.VerifyMountDeviceCallCount( + 1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)) + assert.NoError(t, volumetest.VerifySetUpCallCount( + 1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)) + + // Remove pod + kubelet.podManager.SetPods([]*api.Pod{}) + + assert.NoError(t, waitForVolumeUnmount(kubelet.volumeManager, pod)) + + // Verify volumes unmounted + podVolumes = kubelet.volumeManager.GetMountedVolumesForPod( + volumehelper.GetUniquePodName(pod)) + + assert.Len(t, podVolumes, 0, + "Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes) + + assert.NoError(t, volumetest.VerifyTearDownCallCount( + 1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)) + + // Verify volumes detached and no longer reported as in use + assert.NoError(t, waitForVolumeDetach(api.UniqueVolumeName("fake/vol1"), kubelet.volumeManager)) + assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") + assert.NoError(t, volumetest.VerifyZeroDetachCallCount(testKubelet.volumePlugin)) +} + +type stubVolume struct { + path string + volume.MetricsNil +} + +func (f *stubVolume) GetPath() string { + return f.path +} + +func (f *stubVolume) GetAttributes() volume.Attributes { + return volume.Attributes{} +} + +func (f *stubVolume) SetUp(fsGroup *int64) error { + return nil +} + +func (f *stubVolume) SetUpAt(dir string, fsGroup *int64) error { + return nil +}