mirror of https://github.com/k3s-io/k3s
test/e2e_node
parent
f3b5d514ab
commit
29400ac195
|
@ -28,7 +28,7 @@ import (
|
|||
|
||||
"k8s.io/client-go/pkg/api/errors"
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -138,7 +138,7 @@ func loadTestProfiles() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) api.PodStatus {
|
||||
func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.PodStatus {
|
||||
pod := createPodWithAppArmor(f, profile)
|
||||
if shouldRun {
|
||||
// The pod needs to start before it stops, so wait for the longer start timeout.
|
||||
|
@ -146,7 +146,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) api
|
|||
f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
|
||||
} else {
|
||||
// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
|
||||
w, err := f.PodClient().Watch(api.SingleObject(api.ObjectMeta{Name: pod.Name}))
|
||||
w, err := f.PodClient().Watch(v1.SingleObject(v1.ObjectMeta{Name: pod.Name}))
|
||||
framework.ExpectNoError(err)
|
||||
_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
|
||||
switch e.Type {
|
||||
|
@ -154,7 +154,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) api
|
|||
return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, pod.Name)
|
||||
}
|
||||
switch t := e.Object.(type) {
|
||||
case *api.Pod:
|
||||
case *v1.Pod:
|
||||
if t.Status.Reason == "AppArmor" {
|
||||
return true, nil
|
||||
}
|
||||
|
@ -168,29 +168,29 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) api
|
|||
return p.Status
|
||||
}
|
||||
|
||||
func createPodWithAppArmor(f *framework.Framework, profile string) *api.Pod {
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprintf("test-apparmor-%s", strings.Replace(profile, "/", "-", -1)),
|
||||
Annotations: map[string]string{
|
||||
apparmor.ContainerAnnotationKeyPrefix + "test": profile,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "test",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Command: []string{"touch", "foo"},
|
||||
}},
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
return f.PodClient().Create(pod)
|
||||
}
|
||||
|
||||
func expectSoftRejection(status api.PodStatus) {
|
||||
func expectSoftRejection(status v1.PodStatus) {
|
||||
args := []interface{}{"PodStatus: %+v", status}
|
||||
Expect(status.Phase).To(Equal(api.PodPending), args...)
|
||||
Expect(status.Phase).To(Equal(v1.PodPending), args...)
|
||||
Expect(status.Reason).To(Equal("AppArmor"), args...)
|
||||
Expect(status.Message).To(ContainSubstring("AppArmor"), args...)
|
||||
Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(Equal("Blocked"), args...)
|
||||
|
|
|
@ -17,8 +17,8 @@ limitations under the License.
|
|||
package e2e_node
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
|
@ -30,27 +30,27 @@ import (
|
|||
|
||||
// getResourceList returns a ResourceList with the
|
||||
// specified cpu and memory resource values
|
||||
func getResourceList(cpu, memory string) api.ResourceList {
|
||||
res := api.ResourceList{}
|
||||
func getResourceList(cpu, memory string) v1.ResourceList {
|
||||
res := v1.ResourceList{}
|
||||
if cpu != "" {
|
||||
res[api.ResourceCPU] = resource.MustParse(cpu)
|
||||
res[v1.ResourceCPU] = resource.MustParse(cpu)
|
||||
}
|
||||
if memory != "" {
|
||||
res[api.ResourceMemory] = resource.MustParse(memory)
|
||||
res[v1.ResourceMemory] = resource.MustParse(memory)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// getResourceRequirements returns a ResourceRequirements object
|
||||
func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements {
|
||||
res := api.ResourceRequirements{}
|
||||
func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
|
||||
res := v1.ResourceRequirements{}
|
||||
res.Requests = requests
|
||||
res.Limits = limits
|
||||
return res
|
||||
}
|
||||
|
||||
// makePodToVerifyCgroups returns a pod that verifies the existence of the specified cgroups.
|
||||
func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *api.Pod {
|
||||
func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *v1.Pod {
|
||||
// convert the names to their literal cgroupfs forms...
|
||||
cgroupFsNames := []string{}
|
||||
for _, cgroupName := range cgroupNames {
|
||||
|
@ -68,18 +68,18 @@ func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *api.Pod {
|
|||
command += localCommand
|
||||
}
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Command: []string{"sh", "-c", command},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
MountPath: "/tmp",
|
||||
|
@ -87,11 +87,11 @@ func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *api.Pod {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -101,23 +101,23 @@ func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *api.Pod {
|
|||
}
|
||||
|
||||
// makePodToVerifyCgroupRemoved verfies the specified cgroup does not exist.
|
||||
func makePodToVerifyCgroupRemoved(cgroupName cm.CgroupName) *api.Pod {
|
||||
func makePodToVerifyCgroupRemoved(cgroupName cm.CgroupName) *v1.Pod {
|
||||
cgroupFsName := string(cgroupName)
|
||||
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
|
||||
cgroupFsName = cm.ConvertCgroupNameToSystemd(cm.CgroupName(cgroupName), true)
|
||||
}
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyOnFailure,
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Command: []string{"sh", "-c", "for i in `seq 1 10`; do if [ ! -d /tmp/memory/" + cgroupFsName + " ] && [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 0; else sleep 10; fi; done; exit 1"},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
MountPath: "/tmp",
|
||||
|
@ -125,11 +125,11 @@ func makePodToVerifyCgroupRemoved(cgroupName cm.CgroupName) *api.Pod {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -162,17 +162,17 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||
return
|
||||
}
|
||||
var (
|
||||
guaranteedPod *api.Pod
|
||||
guaranteedPod *v1.Pod
|
||||
podUID string
|
||||
)
|
||||
By("Creating a Guaranteed pod in Namespace", func() {
|
||||
guaranteedPod = f.PodClient().Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
guaranteedPod = f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
|
@ -192,7 +192,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||
})
|
||||
By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
Expect(f.PodClient().Delete(guaranteedPod.Name, &api.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
Expect(f.PodClient().Delete(guaranteedPod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("pod" + podUID))
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
|
@ -207,16 +207,16 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||
}
|
||||
var (
|
||||
podUID string
|
||||
bestEffortPod *api.Pod
|
||||
bestEffortPod *v1.Pod
|
||||
)
|
||||
By("Creating a BestEffort pod in Namespace", func() {
|
||||
bestEffortPod = f.PodClient().Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
bestEffortPod = f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
|
@ -236,7 +236,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||
})
|
||||
By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
Expect(f.PodClient().Delete(bestEffortPod.Name, &api.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
Expect(f.PodClient().Delete(bestEffortPod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("BestEffort/pod" + podUID))
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
|
@ -251,16 +251,16 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||
}
|
||||
var (
|
||||
podUID string
|
||||
burstablePod *api.Pod
|
||||
burstablePod *v1.Pod
|
||||
)
|
||||
By("Creating a Burstable pod in Namespace", func() {
|
||||
burstablePod = f.PodClient().Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
burstablePod = f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
|
@ -280,7 +280,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||
})
|
||||
By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
Expect(f.PodClient().Delete(burstablePod.Name, &api.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
Expect(f.PodClient().Delete(burstablePod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
|
||||
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("Burstable/pod" + podUID))
|
||||
f.PodClient().Create(pod)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
|
|
|
@ -19,8 +19,8 @@ package e2e_node
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
@ -28,29 +28,29 @@ import (
|
|||
// One pod one container
|
||||
// TODO: This should be migrated to the e2e framework.
|
||||
type ConformanceContainer struct {
|
||||
Container api.Container
|
||||
RestartPolicy api.RestartPolicy
|
||||
Volumes []api.Volume
|
||||
Container v1.Container
|
||||
RestartPolicy v1.RestartPolicy
|
||||
Volumes []v1.Volume
|
||||
ImagePullSecrets []string
|
||||
|
||||
PodClient *framework.PodClient
|
||||
podName string
|
||||
PodSecurityContext *api.PodSecurityContext
|
||||
PodSecurityContext *v1.PodSecurityContext
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) Create() {
|
||||
cc.podName = cc.Container.Name + string(uuid.NewUUID())
|
||||
imagePullSecrets := []api.LocalObjectReference{}
|
||||
imagePullSecrets := []v1.LocalObjectReference{}
|
||||
for _, s := range cc.ImagePullSecrets {
|
||||
imagePullSecrets = append(imagePullSecrets, api.LocalObjectReference{Name: s})
|
||||
imagePullSecrets = append(imagePullSecrets, v1.LocalObjectReference{Name: s})
|
||||
}
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: cc.podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: cc.RestartPolicy,
|
||||
Containers: []api.Container{
|
||||
Containers: []v1.Container{
|
||||
cc.Container,
|
||||
},
|
||||
SecurityContext: cc.PodSecurityContext,
|
||||
|
@ -62,7 +62,7 @@ func (cc *ConformanceContainer) Create() {
|
|||
}
|
||||
|
||||
func (cc *ConformanceContainer) Delete() error {
|
||||
return cc.PodClient.Delete(cc.podName, api.NewDeleteOptions(0))
|
||||
return cc.PodClient.Delete(cc.podName, v1.NewDeleteOptions(0))
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) IsReady() (bool, error) {
|
||||
|
@ -70,25 +70,25 @@ func (cc *ConformanceContainer) IsReady() (bool, error) {
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return api.IsPodReady(pod), nil
|
||||
return v1.IsPodReady(pod), nil
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) GetPhase() (api.PodPhase, error) {
|
||||
func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
|
||||
pod, err := cc.PodClient.Get(cc.podName)
|
||||
if err != nil {
|
||||
return api.PodUnknown, err
|
||||
return v1.PodUnknown, err
|
||||
}
|
||||
return pod.Status.Phase, nil
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) GetStatus() (api.ContainerStatus, error) {
|
||||
func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
|
||||
pod, err := cc.PodClient.Get(cc.podName)
|
||||
if err != nil {
|
||||
return api.ContainerStatus{}, err
|
||||
return v1.ContainerStatus{}, err
|
||||
}
|
||||
statuses := pod.Status.ContainerStatuses
|
||||
if len(statuses) != 1 || statuses[0].Name != cc.Container.Name {
|
||||
return api.ContainerStatus{}, fmt.Errorf("unexpected container statuses %v", statuses)
|
||||
return v1.ContainerStatus{}, fmt.Errorf("unexpected container statuses %v", statuses)
|
||||
}
|
||||
return statuses[0], nil
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ const (
|
|||
ContainerStateUnknown ContainerState = "Unknown"
|
||||
)
|
||||
|
||||
func GetContainerState(state api.ContainerState) ContainerState {
|
||||
func GetContainerState(state v1.ContainerState) ContainerState {
|
||||
if state.Waiting != nil {
|
||||
return ContainerStateWaiting
|
||||
}
|
||||
|
|
|
@ -26,8 +26,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -95,12 +95,12 @@ var _ = framework.KubeDescribe("Kubelet Container Manager [Serial]", func() {
|
|||
var err error
|
||||
podClient := f.PodClient()
|
||||
podName := "besteffort" + string(uuid.NewUUID())
|
||||
podClient.Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/serve_hostname:v1.4",
|
||||
Name: podName,
|
||||
|
@ -139,17 +139,17 @@ var _ = framework.KubeDescribe("Kubelet Container Manager [Serial]", func() {
|
|||
It("guaranteed container's oom-score-adj should be -998", func() {
|
||||
podClient := f.PodClient()
|
||||
podName := "guaranteed" + string(uuid.NewUUID())
|
||||
podClient.Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/nginx-slim:0.7",
|
||||
Name: podName,
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("50Mi"),
|
||||
},
|
||||
|
@ -180,17 +180,17 @@ var _ = framework.KubeDescribe("Kubelet Container Manager [Serial]", func() {
|
|||
It("burstable container's oom-score-adj should be between [2, 1000)", func() {
|
||||
podClient := f.PodClient()
|
||||
podName := "burstable" + string(uuid.NewUUID())
|
||||
podClient.Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/test-webserver:e2e",
|
||||
Name: podName,
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("50Mi"),
|
||||
},
|
||||
|
|
|
@ -25,8 +25,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
|
||||
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
|
@ -429,7 +429,7 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
|||
|
||||
// createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation.
|
||||
// between creations there is an interval for throughput control
|
||||
func createBatchPodWithRateControl(f *framework.Framework, pods []*api.Pod, interval time.Duration) map[string]unversioned.Time {
|
||||
func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]unversioned.Time {
|
||||
createTimes := make(map[string]unversioned.Time)
|
||||
for _, pod := range pods {
|
||||
createTimes[pod.ObjectMeta.Name] = unversioned.Now()
|
||||
|
@ -479,12 +479,12 @@ func verifyPodStartupLatency(expect, actual framework.LatencyMetric) error {
|
|||
func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]unversioned.Time,
|
||||
podType string) *cache.Controller {
|
||||
ns := f.Namespace.Name
|
||||
checkPodRunning := func(p *api.Pod) {
|
||||
checkPodRunning := func(p *v1.Pod) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
defer GinkgoRecover()
|
||||
|
||||
if p.Status.Phase == api.PodRunning {
|
||||
if p.Status.Phase == v1.PodRunning {
|
||||
if _, found := watchTimes[p.Name]; !found {
|
||||
watchTimes[p.Name] = unversioned.Now()
|
||||
}
|
||||
|
@ -493,26 +493,26 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
|
|||
|
||||
_, controller := cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType})
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
|
||||
obj, err := f.ClientSet.Core().Pods(ns).List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType})
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
|
||||
return f.ClientSet.Core().Pods(ns).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
&v1.Pod{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
p, ok := obj.(*api.Pod)
|
||||
p, ok := obj.(*v1.Pod)
|
||||
Expect(ok).To(Equal(true))
|
||||
go checkPodRunning(p)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
p, ok := newObj.(*api.Pod)
|
||||
p, ok := newObj.(*v1.Pod)
|
||||
Expect(ok).To(Equal(true))
|
||||
go checkPodRunning(p)
|
||||
},
|
||||
|
@ -522,7 +522,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
|
|||
}
|
||||
|
||||
// createBatchPodSequential creats pods back-to-back in sequence.
|
||||
func createBatchPodSequential(f *framework.Framework, pods []*api.Pod) (time.Duration, []framework.PodLatencyData) {
|
||||
func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod) (time.Duration, []framework.PodLatencyData) {
|
||||
batchStartTime := unversioned.Now()
|
||||
e2eLags := make([]framework.PodLatencyData, 0)
|
||||
for _, pod := range pods {
|
||||
|
|
|
@ -21,13 +21,13 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -72,13 +72,13 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
|
|||
idlePodName = "idle" + string(uuid.NewUUID())
|
||||
verifyPodName = "verify" + string(uuid.NewUUID())
|
||||
createIdlePod(idlePodName, podClient)
|
||||
podClient.Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: busyPodName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Name: busyPodName,
|
||||
|
@ -96,9 +96,9 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
|
|||
if !isImageSupported() || !evictionOptionIsSet() { // Skip the after each
|
||||
return
|
||||
}
|
||||
podClient.DeleteSync(busyPodName, &api.DeleteOptions{}, podDisappearTimeout)
|
||||
podClient.DeleteSync(idlePodName, &api.DeleteOptions{}, podDisappearTimeout)
|
||||
podClient.DeleteSync(verifyPodName, &api.DeleteOptions{}, podDisappearTimeout)
|
||||
podClient.DeleteSync(busyPodName, &v1.DeleteOptions{}, podDisappearTimeout)
|
||||
podClient.DeleteSync(idlePodName, &v1.DeleteOptions{}, podDisappearTimeout)
|
||||
podClient.DeleteSync(verifyPodName, &v1.DeleteOptions{}, podDisappearTimeout)
|
||||
|
||||
// Wait for 2 container gc loop to ensure that the containers are deleted. The containers
|
||||
// created in this test consume a lot of disk, we don't want them to trigger disk eviction
|
||||
|
@ -140,7 +140,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
|
|||
return err
|
||||
}
|
||||
|
||||
if podData.Status.Phase != api.PodRunning {
|
||||
if podData.Status.Phase != v1.PodRunning {
|
||||
err = verifyPodEviction(podData)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -174,7 +174,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if podData.Status.Phase != api.PodRunning {
|
||||
if podData.Status.Phase != v1.PodRunning {
|
||||
return fmt.Errorf("waiting for the new pod to be running")
|
||||
}
|
||||
|
||||
|
@ -186,13 +186,13 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
|
|||
})
|
||||
|
||||
func createIdlePod(podName string, podClient *framework.PodClient) {
|
||||
podClient.Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
Name: podName,
|
||||
|
@ -202,8 +202,8 @@ func createIdlePod(podName string, podClient *framework.PodClient) {
|
|||
})
|
||||
}
|
||||
|
||||
func verifyPodEviction(podData *api.Pod) error {
|
||||
if podData.Status.Phase != api.PodFailed {
|
||||
func verifyPodEviction(podData *v1.Pod) error {
|
||||
if podData.Status.Phase != v1.PodFailed {
|
||||
return fmt.Errorf("expected phase to be failed. got %+v", podData.Status.Phase)
|
||||
}
|
||||
if podData.Status.Reason != "Evicted" {
|
||||
|
@ -215,8 +215,8 @@ func verifyPodEviction(podData *api.Pod) error {
|
|||
func nodeHasDiskPressure(cs clientset.Interface) bool {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
for _, condition := range nodeList.Items[0].Status.Conditions {
|
||||
if condition.Type == api.NodeDiskPressure {
|
||||
return condition.Status == api.ConditionTrue
|
||||
if condition.Type == v1.NodeDiskPressure {
|
||||
return condition.Status == v1.ConditionTrue
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
|
|
@ -31,8 +31,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/services"
|
||||
|
@ -212,7 +212,7 @@ func waitForNodeReady() {
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to get node: %v", err)
|
||||
}
|
||||
if !api.IsNodeReady(node) {
|
||||
if !v1.IsNodeReady(node) {
|
||||
return fmt.Errorf("node is not ready: %+v", node)
|
||||
}
|
||||
return nil
|
||||
|
@ -245,8 +245,8 @@ func updateTestContext() error {
|
|||
}
|
||||
|
||||
// getNode gets node object from the apiserver.
|
||||
func getNode(c *clientset.Clientset) (*api.Node, error) {
|
||||
nodes, err := c.Nodes().List(api.ListOptions{})
|
||||
func getNode(c *clientset.Clientset) (*v1.Node, error) {
|
||||
nodes, err := c.Nodes().List(v1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.")
|
||||
if nodes == nil {
|
||||
return nil, fmt.Errorf("the node list is nil.")
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
docker "k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -230,7 +230,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
|||
AfterEach(func() {
|
||||
for _, pod := range test.testPods {
|
||||
By(fmt.Sprintf("Deleting Pod %v", pod.podName))
|
||||
f.PodClient().DeleteSync(pod.podName, &api.DeleteOptions{}, defaultRuntimeRequestTimeoutDuration)
|
||||
f.PodClient().DeleteSync(pod.podName, &v1.DeleteOptions{}, defaultRuntimeRequestTimeoutDuration)
|
||||
}
|
||||
|
||||
By("Making sure all containers get cleaned up")
|
||||
|
@ -279,12 +279,12 @@ func dockerContainerGCTest(f *framework.Framework, test testRun) {
|
|||
containerGCTest(f, test)
|
||||
}
|
||||
|
||||
func getPods(specs []*testPodSpec) (pods []*api.Pod) {
|
||||
func getPods(specs []*testPodSpec) (pods []*v1.Pod) {
|
||||
for _, spec := range specs {
|
||||
By(fmt.Sprintf("Creating %v containers with restartCount: %v", spec.numContainers, spec.restartCount))
|
||||
containers := []api.Container{}
|
||||
containers := []v1.Container{}
|
||||
for i := 0; i < spec.numContainers; i++ {
|
||||
containers = append(containers, api.Container{
|
||||
containers = append(containers, v1.Container{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Name: spec.getContainerName(i),
|
||||
Command: []string{
|
||||
|
@ -299,18 +299,18 @@ func getPods(specs []*testPodSpec) (pods []*api.Pod) {
|
|||
while true; do sleep 1; done
|
||||
`, i, spec.restartCount+1),
|
||||
},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
|
||||
},
|
||||
})
|
||||
}
|
||||
pods = append(pods, &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: spec.podName},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
pods = append(pods, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: spec.podName},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
Containers: containers,
|
||||
Volumes: []api.Volume{
|
||||
{Name: "test-empty-dir", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
|
||||
Volumes: []v1.Volume{
|
||||
{Name: "test-empty-dir", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package e2e_node
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -33,17 +33,17 @@ var _ = framework.KubeDescribe("ImageID", func() {
|
|||
f := framework.NewDefaultFramework("image-id-test")
|
||||
|
||||
It("should be set to the manifest digest (from RepoDigests) when available", func() {
|
||||
podDesc := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podDesc := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod-with-repodigest",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "test",
|
||||
Image: busyBoxImage,
|
||||
Command: []string{"sh"},
|
||||
}},
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apiUnversioned "k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -39,14 +39,14 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
|||
Context("when scheduling a busybox command in a pod", func() {
|
||||
podName := "busybox-scheduling-" + string(uuid.NewUUID())
|
||||
It("it should print the output to logs [Conformance]", func() {
|
||||
podClient.CreateSync(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Name: podName,
|
||||
|
@ -57,7 +57,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
|||
})
|
||||
Eventually(func() string {
|
||||
sinceTime := apiUnversioned.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))
|
||||
rc, err := podClient.GetLogs(podName, &api.PodLogOptions{SinceTime: &sinceTime}).Stream()
|
||||
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
@ -73,14 +73,14 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
|||
|
||||
BeforeEach(func() {
|
||||
podName = "bin-false" + string(uuid.NewUUID())
|
||||
podClient.Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Name: podName,
|
||||
|
@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
|||
})
|
||||
|
||||
It("should be possible to delete", func() {
|
||||
err := podClient.Delete(podName, &api.DeleteOptions{})
|
||||
err := podClient.Delete(podName, &v1.DeleteOptions{})
|
||||
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
|
||||
})
|
||||
})
|
||||
|
@ -120,19 +120,19 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
|||
podName := "busybox-readonly-fs" + string(uuid.NewUUID())
|
||||
It("it should not write to root filesystem [Conformance]", func() {
|
||||
isReadOnly := true
|
||||
podClient.CreateSync(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
// Don't restart the Pod since it is expected to exit
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Name: podName,
|
||||
Command: []string{"sh", "-c", "echo test > /file; sleep 240"},
|
||||
SecurityContext: &api.SecurityContext{
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
ReadOnlyRootFilesystem: &isReadOnly,
|
||||
},
|
||||
},
|
||||
|
@ -140,7 +140,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
|||
},
|
||||
})
|
||||
Eventually(func() string {
|
||||
rc, err := podClient.GetLogs(podName, &api.PodLogOptions{}).Stream()
|
||||
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
|||
|
||||
Context("when it is exec hook", func() {
|
||||
var file string
|
||||
testPodWithExecHook := func(podWithHook *api.Pod) {
|
||||
testPodWithExecHook := func(podWithHook *v1.Pod) {
|
||||
podCheckHook := getExecHookTestPod("pod-check-hook",
|
||||
// Wait until the file is created.
|
||||
[]string{"sh", "-c", fmt.Sprintf("while [ ! -e %s ]; do sleep 1; done", file)},
|
||||
|
@ -59,7 +59,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
|||
podClient.WaitForSuccess(podCheckHook.Name, postStartWaitTimeout)
|
||||
}
|
||||
By("delete the pod with lifecycle hook")
|
||||
podClient.DeleteSync(podWithHook.Name, api.NewDeleteOptions(15), podWaitTimeout)
|
||||
podClient.DeleteSync(podWithHook.Name, v1.NewDeleteOptions(15), podWaitTimeout)
|
||||
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
|
||||
By("create the hook check pod")
|
||||
podClient.Create(podCheckHook)
|
||||
|
@ -84,9 +84,9 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
|||
// Block forever
|
||||
[]string{"tail", "-f", "/dev/null"},
|
||||
)
|
||||
podWithHook.Spec.Containers[0].Lifecycle = &api.Lifecycle{
|
||||
PostStart: &api.Handler{
|
||||
Exec: &api.ExecAction{Command: []string{"touch", file}},
|
||||
podWithHook.Spec.Containers[0].Lifecycle = &v1.Lifecycle{
|
||||
PostStart: &v1.Handler{
|
||||
Exec: &v1.ExecAction{Command: []string{"touch", file}},
|
||||
},
|
||||
}
|
||||
testPodWithExecHook(podWithHook)
|
||||
|
@ -97,9 +97,9 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
|||
// Block forever
|
||||
[]string{"tail", "-f", "/dev/null"},
|
||||
)
|
||||
podWithHook.Spec.Containers[0].Lifecycle = &api.Lifecycle{
|
||||
PreStop: &api.Handler{
|
||||
Exec: &api.ExecAction{Command: []string{"touch", file}},
|
||||
podWithHook.Spec.Containers[0].Lifecycle = &v1.Lifecycle{
|
||||
PreStop: &v1.Handler{
|
||||
Exec: &v1.ExecAction{Command: []string{"touch", file}},
|
||||
},
|
||||
}
|
||||
testPodWithExecHook(podWithHook)
|
||||
|
@ -108,19 +108,19 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
|||
|
||||
Context("when it is http hook", func() {
|
||||
var targetIP string
|
||||
podHandleHookRequest := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podHandleHookRequest := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod-handle-http-request",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pod-handle-http-request",
|
||||
Image: "gcr.io/google_containers/netexec:1.7",
|
||||
Ports: []api.ContainerPort{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8080,
|
||||
Protocol: api.ProtocolTCP,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
|||
newPod := podClient.CreateSync(podHandleHookRequest)
|
||||
targetIP = newPod.Status.PodIP
|
||||
})
|
||||
testPodWithHttpHook := func(podWithHook *api.Pod) {
|
||||
testPodWithHttpHook := func(podWithHook *v1.Pod) {
|
||||
By("create the pod with lifecycle hook")
|
||||
podClient.CreateSync(podWithHook)
|
||||
if podWithHook.Spec.Containers[0].Lifecycle.PostStart != nil {
|
||||
|
@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
|||
}, postStartWaitTimeout, podCheckInterval).Should(BeNil())
|
||||
}
|
||||
By("delete the pod with lifecycle hook")
|
||||
podClient.DeleteSync(podWithHook.Name, api.NewDeleteOptions(15), podWaitTimeout)
|
||||
podClient.DeleteSync(podWithHook.Name, v1.NewDeleteOptions(15), podWaitTimeout)
|
||||
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
|
||||
By("check prestop hook")
|
||||
Eventually(func() error {
|
||||
|
@ -153,18 +153,18 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
|||
}
|
||||
}
|
||||
It("should execute poststart http hook properly [Conformance]", func() {
|
||||
podWithHook := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podWithHook := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod-with-poststart-http-hook",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pod-with-poststart-http-hook",
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
Lifecycle: &api.Lifecycle{
|
||||
PostStart: &api.Handler{
|
||||
HTTPGet: &api.HTTPGetAction{
|
||||
Lifecycle: &v1.Lifecycle{
|
||||
PostStart: &v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/echo?msg=poststart",
|
||||
Host: targetIP,
|
||||
Port: intstr.FromInt(8080),
|
||||
|
@ -178,18 +178,18 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
|||
testPodWithHttpHook(podWithHook)
|
||||
})
|
||||
It("should execute prestop http hook properly [Conformance]", func() {
|
||||
podWithHook := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podWithHook := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod-with-prestop-http-hook",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pod-with-prestop-http-hook",
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
Lifecycle: &api.Lifecycle{
|
||||
PreStop: &api.Handler{
|
||||
HTTPGet: &api.HTTPGetAction{
|
||||
Lifecycle: &v1.Lifecycle{
|
||||
PreStop: &v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/echo?msg=prestop",
|
||||
Host: targetIP,
|
||||
Port: intstr.FromInt(8080),
|
||||
|
@ -206,17 +206,17 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
|||
})
|
||||
})
|
||||
|
||||
func getExecHookTestPod(name string, cmd []string) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func getExecHookTestPod(name string, cmd []string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "tmpfs",
|
||||
MountPath: "/tmp",
|
||||
|
@ -225,11 +225,11 @@ func getExecHookTestPod(name string, cmd []string) *api.Pod {
|
|||
Command: cmd,
|
||||
},
|
||||
},
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Volumes: []api.Volume{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "tmpfs",
|
||||
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/tmp"}},
|
||||
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/tmp"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package e2e_node
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
|
@ -44,14 +44,14 @@ var _ = framework.KubeDescribe("ContainerLogPath", func() {
|
|||
checkPodName := "checker" + string(uuid.NewUUID())
|
||||
checkContName := "checker-c-" + string(uuid.NewUUID())
|
||||
|
||||
logPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
logPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: logPodName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
// this pod is expected to exit successfully
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Name: logContName,
|
||||
|
@ -72,21 +72,21 @@ var _ = framework.KubeDescribe("ContainerLogPath", func() {
|
|||
|
||||
expectedlogFile := logDir + "/" + logPodName + "_" + ns + "_" + logContName + "-" + logConID.ID + ".log"
|
||||
|
||||
checkPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
checkPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: checkPodName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
// this pod is expected to exit successfully
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Name: checkContName,
|
||||
// If we find expected log file and contains right content, exit 0
|
||||
// else, keep checking until test timeout
|
||||
Command: []string{"sh", "-c", "while true; do if [ -e " + expectedlogFile + " ] && grep -q " + logString + " " + expectedlogFile + "; then exit 0; fi; sleep 1; done"},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: logDirVolumeName,
|
||||
// mount ContainerLogsDir to the same path in container
|
||||
|
@ -96,11 +96,11 @@ var _ = framework.KubeDescribe("ContainerLogPath", func() {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: logDirVolumeName,
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: expectedlogFile,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
|||
// Wait for the memory pressure condition to disappear from the node status before continuing.
|
||||
By("waiting for the memory pressure condition on the node to disappear before ending the test.")
|
||||
Eventually(func() error {
|
||||
nodeList, err := f.ClientSet.Core().Nodes().List(api.ListOptions{})
|
||||
nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("tried to get node list but got error: %v", err)
|
||||
}
|
||||
|
@ -59,8 +59,8 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
|||
return fmt.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items)
|
||||
}
|
||||
node := nodeList.Items[0]
|
||||
_, pressure := api.GetNodeCondition(&node.Status, api.NodeMemoryPressure)
|
||||
if pressure != nil && pressure.Status == api.ConditionTrue {
|
||||
_, pressure := v1.GetNodeCondition(&node.Status, v1.NodeMemoryPressure)
|
||||
if pressure != nil && pressure.Status == v1.ConditionTrue {
|
||||
return fmt.Errorf("node is still reporting memory pressure condition: %s", pressure)
|
||||
}
|
||||
return nil
|
||||
|
@ -104,13 +104,13 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
|||
// Finally, try starting a new pod and wait for it to be scheduled and running.
|
||||
// This is the final check to try to prevent interference with subsequent tests.
|
||||
podName := "admit-best-effort-pod"
|
||||
f.PodClient().CreateSync(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
Name: podName,
|
||||
|
@ -124,25 +124,25 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
|||
By("creating a guaranteed pod, a burstable pod, and a besteffort pod.")
|
||||
|
||||
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||
guaranteed := createMemhogPod(f, "guaranteed-", "guaranteed", api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
guaranteed := createMemhogPod(f, "guaranteed-", "guaranteed", v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("100Mi"),
|
||||
},
|
||||
Limits: api.ResourceList{
|
||||
Limits: v1.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("100Mi"),
|
||||
}})
|
||||
|
||||
// A pod is burstable if limits and requests do not match across all containers.
|
||||
burstable := createMemhogPod(f, "burstable-", "burstable", api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
burstable := createMemhogPod(f, "burstable-", "burstable", v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("100Mi"),
|
||||
}})
|
||||
|
||||
// A pod is besteffort if none of its containers have specified any requests or limits.
|
||||
besteffort := createMemhogPod(f, "besteffort-", "besteffort", api.ResourceRequirements{})
|
||||
besteffort := createMemhogPod(f, "besteffort-", "besteffort", v1.ResourceRequirements{})
|
||||
|
||||
// We poll until timeout or all pods are killed.
|
||||
// Inside the func, we check that all pods are in a valid phase with
|
||||
|
@ -174,7 +174,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
|||
// see the eviction manager reporting a pressure condition for a while without the besteffort failing,
|
||||
// and we see that the manager did in fact evict the besteffort (this should be in the Kubelet log), we
|
||||
// will have more reason to believe the phase is out of date.
|
||||
nodeList, err := f.ClientSet.Core().Nodes().List(api.ListOptions{})
|
||||
nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("tried to get node list but got error: %v", err)
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
|||
glog.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items)
|
||||
}
|
||||
node := nodeList.Items[0]
|
||||
_, pressure := api.GetNodeCondition(&node.Status, api.NodeMemoryPressure)
|
||||
_, pressure := v1.GetNodeCondition(&node.Status, v1.NodeMemoryPressure)
|
||||
glog.Infof("node pressure condition: %s", pressure)
|
||||
|
||||
// NOTE/TODO(mtaufen): Also log (at least temporarily) the actual memory consumption on the node.
|
||||
|
@ -198,15 +198,15 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
|||
|
||||
}
|
||||
|
||||
if bestPh == api.PodRunning {
|
||||
Expect(burstPh).NotTo(Equal(api.PodFailed), "burstable pod failed before best effort pod")
|
||||
Expect(gteedPh).NotTo(Equal(api.PodFailed), "guaranteed pod failed before best effort pod")
|
||||
} else if burstPh == api.PodRunning {
|
||||
Expect(gteedPh).NotTo(Equal(api.PodFailed), "guaranteed pod failed before burstable pod")
|
||||
if bestPh == v1.PodRunning {
|
||||
Expect(burstPh).NotTo(Equal(v1.PodFailed), "burstable pod failed before best effort pod")
|
||||
Expect(gteedPh).NotTo(Equal(v1.PodFailed), "guaranteed pod failed before best effort pod")
|
||||
} else if burstPh == v1.PodRunning {
|
||||
Expect(gteedPh).NotTo(Equal(v1.PodFailed), "guaranteed pod failed before burstable pod")
|
||||
}
|
||||
|
||||
// When both besteffort and burstable have been evicted, the test has completed.
|
||||
if bestPh == api.PodFailed && burstPh == api.PodFailed {
|
||||
if bestPh == v1.PodFailed && burstPh == v1.PodFailed {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("besteffort and burstable have not yet both been evicted.")
|
||||
|
@ -219,12 +219,12 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
|||
|
||||
})
|
||||
|
||||
func createMemhogPod(f *framework.Framework, genName string, ctnName string, res api.ResourceRequirements) *api.Pod {
|
||||
env := []api.EnvVar{
|
||||
func createMemhogPod(f *framework.Framework, genName string, ctnName string, res v1.ResourceRequirements) *v1.Pod {
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "MEMORY_LIMIT",
|
||||
ValueFrom: &api.EnvVarSource{
|
||||
ResourceFieldRef: &api.ResourceFieldSelector{
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
},
|
||||
|
@ -243,13 +243,13 @@ func createMemhogPod(f *framework.Framework, genName string, ctnName string, res
|
|||
memLimit = "$(MEMORY_LIMIT)"
|
||||
}
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: genName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: ctnName,
|
||||
Image: "gcr.io/google-containers/stress:v1",
|
||||
|
|
|
@ -23,9 +23,9 @@ import (
|
|||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -47,7 +47,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
|||
|
||||
By("create the static pod")
|
||||
err := createStaticPod(manifestPath, staticPodName, ns,
|
||||
"gcr.io/google_containers/nginx-slim:0.7", api.RestartPolicyAlways)
|
||||
"gcr.io/google_containers/nginx-slim:0.7", v1.RestartPolicyAlways)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to be running")
|
||||
|
@ -63,7 +63,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
|||
|
||||
By("update the static pod container image")
|
||||
image := framework.GetPauseImageNameForHostArch()
|
||||
err = createStaticPod(manifestPath, staticPodName, ns, image, api.RestartPolicyAlways)
|
||||
err = createStaticPod(manifestPath, staticPodName, ns, image, v1.RestartPolicyAlways)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to be updated")
|
||||
|
@ -84,7 +84,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
|||
uid := pod.UID
|
||||
|
||||
By("delete the mirror pod with grace period 30s")
|
||||
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, api.NewDeleteOptions(30))
|
||||
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, v1.NewDeleteOptions(30))
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to be recreated")
|
||||
|
@ -99,7 +99,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
|||
uid := pod.UID
|
||||
|
||||
By("delete the mirror pod with grace period 0s")
|
||||
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, api.NewDeleteOptions(0))
|
||||
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, v1.NewDeleteOptions(0))
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to be recreated")
|
||||
|
@ -124,7 +124,7 @@ func staticPodPath(dir, name, namespace string) string {
|
|||
return filepath.Join(dir, namespace+"-"+name+".yaml")
|
||||
}
|
||||
|
||||
func createStaticPod(dir, name, namespace, image string, restart api.RestartPolicy) error {
|
||||
func createStaticPod(dir, name, namespace, image string, restart v1.RestartPolicy) error {
|
||||
template := `
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
@ -168,7 +168,7 @@ func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error
|
|||
if err != nil {
|
||||
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
|
||||
}
|
||||
if pod.Status.Phase != api.PodRunning {
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
return fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase)
|
||||
}
|
||||
return nil
|
||||
|
@ -182,7 +182,7 @@ func checkMirrorPodRecreatedAndRunnig(cl clientset.Interface, name, namespace st
|
|||
if pod.UID == oUID {
|
||||
return fmt.Errorf("expected the uid of mirror pod %q to be changed, got %q", name, pod.UID)
|
||||
}
|
||||
if pod.Status.Phase != api.PodRunning {
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
return fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
cadvisorclient "github.com/google/cadvisor/client/v2"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/procfs"
|
||||
|
@ -292,30 +292,29 @@ func formatCPUSummary(summary framework.ContainersCPUSummary) string {
|
|||
}
|
||||
|
||||
// createCadvisorPod creates a standalone cadvisor pod for fine-grain resource monitoring.
|
||||
func getCadvisorPod() *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func getCadvisorPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: cadvisorPodName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
// It uses a host port for the tests to collect data.
|
||||
// Currently we can not use port mapping in test-e2e-node.
|
||||
SecurityContext: &api.PodSecurityContext{
|
||||
HostNetwork: true,
|
||||
},
|
||||
Containers: []api.Container{
|
||||
HostNetwork: true,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: cadvisorImageName,
|
||||
Name: cadvisorPodName,
|
||||
Ports: []api.ContainerPort{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
HostPort: cadvisorPort,
|
||||
ContainerPort: cadvisorPort,
|
||||
Protocol: api.ProtocolTCP,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "sys",
|
||||
ReadOnly: true,
|
||||
|
@ -344,22 +343,22 @@ func getCadvisorPod() *api.Pod {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "rootfs",
|
||||
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/"}},
|
||||
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/"}},
|
||||
},
|
||||
{
|
||||
Name: "var-run",
|
||||
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/var/run"}},
|
||||
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/var/run"}},
|
||||
},
|
||||
{
|
||||
Name: "sys",
|
||||
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/sys"}},
|
||||
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/sys"}},
|
||||
},
|
||||
{
|
||||
Name: "docker",
|
||||
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/var/lib/docker"}},
|
||||
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/var/lib/docker"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -367,14 +366,14 @@ func getCadvisorPod() *api.Pod {
|
|||
}
|
||||
|
||||
// deletePodsSync deletes a list of pods and block until pods disappear.
|
||||
func deletePodsSync(f *framework.Framework, pods []*api.Pod) {
|
||||
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
|
||||
var wg sync.WaitGroup
|
||||
for _, pod := range pods {
|
||||
wg.Add(1)
|
||||
go func(pod *api.Pod) {
|
||||
go func(pod *v1.Pod) {
|
||||
defer wg.Done()
|
||||
|
||||
err := f.PodClient().Delete(pod.ObjectMeta.Name, api.NewDeleteOptions(30))
|
||||
err := f.PodClient().Delete(pod.ObjectMeta.Name, v1.NewDeleteOptions(30))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
|
||||
|
@ -386,8 +385,8 @@ func deletePodsSync(f *framework.Framework, pods []*api.Pod) {
|
|||
}
|
||||
|
||||
// newTestPods creates a list of pods (specification) for test.
|
||||
func newTestPods(numPods int, imageName, podType string) []*api.Pod {
|
||||
var pods []*api.Pod
|
||||
func newTestPods(numPods int, imageName, podType string) []*v1.Pod {
|
||||
var pods []*v1.Pod
|
||||
for i := 0; i < numPods; i++ {
|
||||
podName := "test-" + string(uuid.NewUUID())
|
||||
labels := map[string]string{
|
||||
|
@ -395,14 +394,14 @@ func newTestPods(numPods int, imageName, podType string) []*api.Pod {
|
|||
"name": podName,
|
||||
}
|
||||
pods = append(pods,
|
||||
&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
// Restart policy is always (default).
|
||||
Containers: []api.Container{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageName,
|
||||
Name: podName,
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
|
|
@ -19,27 +19,29 @@ limitations under the License.
|
|||
package e2e_node
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"fmt"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"os/exec"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// waitForPods waits for timeout duration, for pod_count.
|
||||
// If the timeout is hit, it returns the list of currently running pods.
|
||||
func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (runningPods []*api.Pod) {
|
||||
func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (runningPods []*v1.Pod) {
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
|
||||
podList, err := f.PodClient().List(api.ListOptions{})
|
||||
podList, err := f.PodClient().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to list pods on node: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
runningPods = []*api.Pod{}
|
||||
runningPods = []*v1.Pod{}
|
||||
for _, pod := range podList.Items {
|
||||
if r, err := testutils.PodRunningReady(&pod); err != nil || !r {
|
||||
continue
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"path"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -44,34 +44,34 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
|
|||
It("it should run with the expected status [Conformance]", func() {
|
||||
restartCountVolumeName := "restart-count"
|
||||
restartCountVolumePath := "/restart-count"
|
||||
testContainer := api.Container{
|
||||
testContainer := v1.Container{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
MountPath: restartCountVolumePath,
|
||||
Name: restartCountVolumeName,
|
||||
},
|
||||
},
|
||||
}
|
||||
testVolumes := []api.Volume{
|
||||
testVolumes := []v1.Volume{
|
||||
{
|
||||
Name: restartCountVolumeName,
|
||||
VolumeSource: api.VolumeSource{
|
||||
EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
|
||||
},
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
Name string
|
||||
RestartPolicy api.RestartPolicy
|
||||
Phase api.PodPhase
|
||||
RestartPolicy v1.RestartPolicy
|
||||
Phase v1.PodPhase
|
||||
State ContainerState
|
||||
RestartCount int32
|
||||
Ready bool
|
||||
}{
|
||||
{"terminate-cmd-rpa", api.RestartPolicyAlways, api.PodRunning, ContainerStateRunning, 2, true},
|
||||
{"terminate-cmd-rpof", api.RestartPolicyOnFailure, api.PodSucceeded, ContainerStateTerminated, 1, false},
|
||||
{"terminate-cmd-rpn", api.RestartPolicyNever, api.PodFailed, ContainerStateTerminated, 0, false},
|
||||
{"terminate-cmd-rpa", v1.RestartPolicyAlways, v1.PodRunning, ContainerStateRunning, 2, true},
|
||||
{"terminate-cmd-rpof", v1.RestartPolicyOnFailure, v1.PodSucceeded, ContainerStateTerminated, 1, false},
|
||||
{"terminate-cmd-rpn", v1.RestartPolicyNever, v1.PodFailed, ContainerStateTerminated, 0, false},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
|
||||
|
@ -95,8 +95,8 @@ while true; do sleep 1; done
|
|||
Container: testContainer,
|
||||
RestartPolicy: testCase.RestartPolicy,
|
||||
Volumes: testVolumes,
|
||||
PodSecurityContext: &api.PodSecurityContext{
|
||||
SELinuxOptions: &api.SELinuxOptions{
|
||||
PodSecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0",
|
||||
},
|
||||
},
|
||||
|
@ -135,17 +135,17 @@ while true; do sleep 1; done
|
|||
priv := true
|
||||
c := ConformanceContainer{
|
||||
PodClient: f.PodClient(),
|
||||
Container: api.Container{
|
||||
Container: v1.Container{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Name: name,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{fmt.Sprintf("/bin/echo -n %s > %s", terminationMessage, terminationMessagePath)},
|
||||
TerminationMessagePath: terminationMessagePath,
|
||||
SecurityContext: &api.SecurityContext{
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &priv,
|
||||
},
|
||||
},
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
}
|
||||
|
||||
By("create the container")
|
||||
|
@ -153,7 +153,7 @@ while true; do sleep 1; done
|
|||
defer c.Delete()
|
||||
|
||||
By("wait for the container to succeed")
|
||||
Eventually(c.GetPhase, retryTimeout, pollInterval).Should(Equal(api.PodSucceeded))
|
||||
Eventually(c.GetPhase, retryTimeout, pollInterval).Should(Equal(v1.PodSucceeded))
|
||||
|
||||
By("get the container status")
|
||||
status, err := c.GetStatus()
|
||||
|
@ -181,55 +181,55 @@ while true; do sleep 1; done
|
|||
}
|
||||
}
|
||||
}`
|
||||
secret := &api.Secret{
|
||||
Data: map[string][]byte{api.DockerConfigJsonKey: []byte(auth)},
|
||||
Type: api.SecretTypeDockerConfigJson,
|
||||
secret := &v1.Secret{
|
||||
Data: map[string][]byte{v1.DockerConfigJsonKey: []byte(auth)},
|
||||
Type: v1.SecretTypeDockerConfigJson,
|
||||
}
|
||||
// The following images are not added into NodeImageWhiteList, because this test is
|
||||
// testing image pulling, these images don't need to be prepulled. The ImagePullPolicy
|
||||
// is api.PullAlways, so it won't be blocked by framework image white list check.
|
||||
// is v1.PullAlways, so it won't be blocked by framework image white list check.
|
||||
for _, testCase := range []struct {
|
||||
description string
|
||||
image string
|
||||
secret bool
|
||||
phase api.PodPhase
|
||||
phase v1.PodPhase
|
||||
waiting bool
|
||||
}{
|
||||
{
|
||||
description: "should not be able to pull image from invalid registry",
|
||||
image: "invalid.com/invalid/alpine:3.1",
|
||||
phase: api.PodPending,
|
||||
phase: v1.PodPending,
|
||||
waiting: true,
|
||||
},
|
||||
{
|
||||
description: "should not be able to pull non-existing image from gcr.io",
|
||||
image: "gcr.io/google_containers/invalid-image:invalid-tag",
|
||||
phase: api.PodPending,
|
||||
phase: v1.PodPending,
|
||||
waiting: true,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull image from gcr.io",
|
||||
image: "gcr.io/google_containers/alpine-with-bash:1.0",
|
||||
phase: api.PodRunning,
|
||||
phase: v1.PodRunning,
|
||||
waiting: false,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull image from docker hub",
|
||||
image: "alpine:3.1",
|
||||
phase: api.PodRunning,
|
||||
phase: v1.PodRunning,
|
||||
waiting: false,
|
||||
},
|
||||
{
|
||||
description: "should not be able to pull from private registry without secret",
|
||||
image: "gcr.io/authenticated-image-pulling/alpine:3.1",
|
||||
phase: api.PodPending,
|
||||
phase: v1.PodPending,
|
||||
waiting: true,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull from private registry with secret",
|
||||
image: "gcr.io/authenticated-image-pulling/alpine:3.1",
|
||||
secret: true,
|
||||
phase: api.PodRunning,
|
||||
phase: v1.PodRunning,
|
||||
waiting: false,
|
||||
},
|
||||
} {
|
||||
|
@ -239,14 +239,14 @@ while true; do sleep 1; done
|
|||
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
|
||||
container := ConformanceContainer{
|
||||
PodClient: f.PodClient(),
|
||||
Container: api.Container{
|
||||
Container: v1.Container{
|
||||
Name: name,
|
||||
Image: testCase.image,
|
||||
Command: command,
|
||||
// PullAlways makes sure that the image will always be pulled even if it is present before the test.
|
||||
ImagePullPolicy: api.PullAlways,
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
},
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
}
|
||||
if testCase.secret {
|
||||
secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
|
||||
|
|
|
@ -19,9 +19,9 @@ package services
|
|||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
|
||||
|
@ -57,7 +57,7 @@ func (n *NamespaceController) Start() error {
|
|||
}
|
||||
clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
gvrFn := client.Discovery().ServerPreferredNamespacedResources
|
||||
nc := namespacecontroller.NewNamespaceController(client, clientPool, gvrFn, ncResyncPeriod, api.FinalizerKubernetes)
|
||||
nc := namespacecontroller.NewNamespaceController(client, clientPool, gvrFn, ncResyncPeriod, v1.FinalizerKubernetes)
|
||||
go nc.Run(ncConcurrency, n.stopCh)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,8 +17,8 @@ limitations under the License.
|
|||
package e2e_node
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -30,20 +30,20 @@ var _ = framework.KubeDescribe("SimpleMount", func() {
|
|||
// This is a very simple test that exercises the Kubelet's mounter code path.
|
||||
// If the mount fails, the pod will not be able to run, and CreateSync will timeout.
|
||||
It("should be able to mount an emptydir on a container", func() {
|
||||
pod := &api.Pod{
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "simple-mount-pod",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "simple-mount-container",
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "simply-mounted-volume",
|
||||
MountPath: "/opt/",
|
||||
|
@ -51,11 +51,11 @@ var _ = framework.KubeDescribe("SimpleMount", func() {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "simply-mounted-volume",
|
||||
VolumeSource: api.VolumeSource{
|
||||
EmptyDir: &api.EmptyDirVolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
Medium: "Memory",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -20,9 +20,9 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -211,39 +211,39 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||
})
|
||||
|
||||
func createSummaryTestPods(f *framework.Framework, names ...string) {
|
||||
pods := make([]*api.Pod, 0, len(names))
|
||||
pods := make([]*v1.Pod, 0, len(names))
|
||||
for _, name := range names {
|
||||
pods = append(pods, &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pods = append(pods, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox-container",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Command: []string{"sh", "-c", "ping -c 1 google.com; while true; do echo 'hello world' >> /test-empty-dir-mnt/file ; sleep 1; done"},
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
// Must set memory limit to get MemoryStats.AvailableBytes
|
||||
api.ResourceMemory: resource.MustParse("10M"),
|
||||
v1.ResourceMemory: resource.MustParse("10M"),
|
||||
},
|
||||
},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &api.PodSecurityContext{
|
||||
SELinuxOptions: &api.SELinuxOptions{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0",
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
// TODO(#28393): Test secret volumes
|
||||
// TODO(#28394): Test hostpath volumes
|
||||
{Name: "test-empty-dir", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
|
||||
{Name: "test-empty-dir", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
k8serr "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
v1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
|
||||
|
@ -84,7 +85,7 @@ func getCurrentKubeletConfig() (*componentconfig.KubeletConfiguration, error) {
|
|||
}
|
||||
|
||||
// Queries the API server for a Kubelet configuration for the node described by framework.TestContext.NodeName
|
||||
func getCurrentKubeletConfigMap(f *framework.Framework) (*api.ConfigMap, error) {
|
||||
func getCurrentKubeletConfigMap(f *framework.Framework) (*v1.ConfigMap, error) {
|
||||
return f.ClientSet.Core().ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", framework.TestContext.NodeName))
|
||||
}
|
||||
|
||||
|
@ -195,15 +196,15 @@ func decodeConfigz(resp *http.Response) (*componentconfig.KubeletConfiguration,
|
|||
}
|
||||
|
||||
// Constructs a Kubelet ConfigMap targeting the current node running the node e2e tests
|
||||
func makeKubeletConfigMap(nodeName string, kubeCfg *componentconfig.KubeletConfiguration) *api.ConfigMap {
|
||||
func makeKubeletConfigMap(nodeName string, kubeCfg *componentconfig.KubeletConfiguration) *v1.ConfigMap {
|
||||
kubeCfgExt := v1alpha1.KubeletConfiguration{}
|
||||
api.Scheme.Convert(kubeCfg, &kubeCfgExt, nil)
|
||||
|
||||
bytes, err := json.Marshal(kubeCfgExt)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
cmap := &api.ConfigMap{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
cmap := &v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprintf("kubelet-%s", nodeName),
|
||||
},
|
||||
Data: map[string]string{
|
||||
|
@ -214,7 +215,7 @@ func makeKubeletConfigMap(nodeName string, kubeCfg *componentconfig.KubeletConfi
|
|||
}
|
||||
|
||||
// Uses KubeletConfiguration to create a `kubelet-<node-name>` ConfigMap in the "kube-system" namespace.
|
||||
func createConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletConfiguration) (*api.ConfigMap, error) {
|
||||
func createConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletConfiguration) (*v1.ConfigMap, error) {
|
||||
cmap := makeKubeletConfigMap(framework.TestContext.NodeName, kubeCfg)
|
||||
cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Create(cmap)
|
||||
if err != nil {
|
||||
|
@ -224,7 +225,7 @@ func createConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletCon
|
|||
}
|
||||
|
||||
// Similar to createConfigMap, except this updates an existing ConfigMap.
|
||||
func updateConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletConfiguration) (*api.ConfigMap, error) {
|
||||
func updateConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletConfiguration) (*v1.ConfigMap, error) {
|
||||
cmap := makeKubeletConfigMap(framework.TestContext.NodeName, kubeCfg)
|
||||
cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Update(cmap)
|
||||
if err != nil {
|
||||
|
|
|
@ -19,7 +19,7 @@ package e2e_node
|
|||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -35,24 +35,24 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
|
|||
Context("On terminatation of pod with memory backed volume", func() {
|
||||
It("should remove the volume from the node", func() {
|
||||
var (
|
||||
memoryBackedPod *api.Pod
|
||||
memoryBackedPod *v1.Pod
|
||||
volumeName string
|
||||
)
|
||||
By("Creating a pod with a memory backed volume that exits success without restart", func() {
|
||||
volumeName = "memory-volume"
|
||||
memoryBackedPod = f.PodClient().Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
memoryBackedPod = f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Command: []string{"sh", "-c", "echo"},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: "/tmp",
|
||||
|
@ -60,11 +60,11 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: api.VolumeSource{
|
||||
EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -79,19 +79,19 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
|
|||
for i := 0; i < 10; i++ {
|
||||
// need to create a new verification pod on each pass since updates
|
||||
//to the HostPath volume aren't propogated to the pod
|
||||
pod := f.PodClient().Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Command: []string{"sh", "-c", "if [ -d " + volumePath + " ]; then exit 1; fi;"},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "kubelet-pods",
|
||||
MountPath: "/tmp",
|
||||
|
@ -99,13 +99,13 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "kubelet-pods",
|
||||
VolumeSource: api.VolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
// TODO: remove hardcoded kubelet volume directory path
|
||||
// framework.TestContext.KubeVolumeDir is currently not populated for node e2e
|
||||
HostPath: &api.HostPathVolumeSource{Path: "/var/lib/kubelet/pods"},
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/var/lib/kubelet/pods"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -113,7 +113,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
|
|||
})
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
gp := int64(1)
|
||||
f.PodClient().Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
f.PodClient().Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue