GetOptions - fix tests

pull/6/head
Wojciech Tyczynski 2016-12-07 15:40:26 +01:00
parent e8d1cba875
commit a9ec31209e
88 changed files with 398 additions and 338 deletions

View File

@ -26,6 +26,7 @@ import (
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -329,11 +330,11 @@ var _ = framework.KubeDescribe("Addon update", func() {
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v2", false) waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v2", false)
By("verify invalid API addons weren't created") By("verify invalid API addons weren't created")
_, err = f.ClientSet.Core().ReplicationControllers(addonNsName).Get("invalid-addon-test-v1") _, err = f.ClientSet.Core().ReplicationControllers(addonNsName).Get("invalid-addon-test-v1", metav1.GetOptions{})
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
_, err = f.ClientSet.Core().Services(addonNsName).Get("ivalid-addon-test") _, err = f.ClientSet.Core().Services(addonNsName).Get("ivalid-addon-test", metav1.GetOptions{})
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
_, err = f.ClientSet.Core().Services(defaultNsName).Get("ivalid-addon-test-v2") _, err = f.ClientSet.Core().Services(defaultNsName).Get("ivalid-addon-test-v2", metav1.GetOptions{})
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
// invalid addons will be deleted by the deferred function // invalid addons will be deleted by the deferred function

View File

@ -22,6 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
@ -247,21 +248,21 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
func (rc *ResourceConsumer) GetReplicas() int { func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind { switch rc.kind {
case kindRC: case kindRC:
replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name) replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if replicationController == nil { if replicationController == nil {
framework.Failf(rcIsNil) framework.Failf(rcIsNil)
} }
return int(replicationController.Status.Replicas) return int(replicationController.Status.Replicas)
case kindDeployment: case kindDeployment:
deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name) deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if deployment == nil { if deployment == nil {
framework.Failf(deploymentIsNil) framework.Failf(deploymentIsNil)
} }
return int(deployment.Status.Replicas) return int(deployment.Status.Replicas)
case kindReplicaSet: case kindReplicaSet:
rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name) rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if rs == nil { if rs == nil {
framework.Failf(rsIsNil) framework.Failf(rsIsNil)

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
batchinternal "k8s.io/kubernetes/pkg/apis/batch" batchinternal "k8s.io/kubernetes/pkg/apis/batch"
batch "k8s.io/kubernetes/pkg/apis/batch/v1" batch "k8s.io/kubernetes/pkg/apis/batch/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -279,7 +280,7 @@ func newTestV1Job(behavior, name string, rPol v1.RestartPolicy, parallelism, com
} }
func getV1Job(c clientset.Interface, ns, name string) (*batch.Job, error) { func getV1Job(c clientset.Interface, ns, name string) (*batch.Job, error) {
return c.Batch().Jobs(ns).Get(name) return c.Batch().Jobs(ns).Get(name, metav1.GetOptions{})
} }
func createV1Job(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) { func createV1Job(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
@ -316,7 +317,7 @@ func waitForAllPodsRunningV1(c clientset.Interface, ns, jobName string, parallel
// Wait for job to reach completions. // Wait for job to reach completions.
func waitForV1JobFinish(c clientset.Interface, ns, jobName string, completions int32) error { func waitForV1JobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
curr, err := c.Batch().Jobs(ns).Get(jobName) curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -327,7 +328,7 @@ func waitForV1JobFinish(c clientset.Interface, ns, jobName string, completions i
// Wait for job fail. // Wait for job fail.
func waitForV1JobFail(c clientset.Interface, ns, jobName string, timeout time.Duration) error { func waitForV1JobFail(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
return wait.Poll(framework.Poll, timeout, func() (bool, error) { return wait.Poll(framework.Poll, timeout, func() (bool, error) {
curr, err := c.Batch().Jobs(ns).Get(jobName) curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -92,7 +93,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
// being run as the first e2e test just after the e2e cluster has been created. // being run as the first e2e test just after the e2e cluster has been created.
var err error var err error
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
if _, err = s.Get("elasticsearch-logging"); err == nil { if _, err = s.Get("elasticsearch-logging", metav1.GetOptions{}); err == nil {
break break
} }
framework.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start)) framework.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start))

View File

@ -23,6 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@ -62,7 +63,7 @@ func createSynthLogger(f *framework.Framework, linesCount int) {
} }
func reportLogsFromFluentdPod(f *framework.Framework) error { func reportLogsFromFluentdPod(f *framework.Framework) error {
synthLoggerPod, err := f.PodClient().Get(synthLoggerPodName) synthLoggerPod, err := f.PodClient().Get(synthLoggerPodName, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("Failed to get synth logger pod due to %v", err) return fmt.Errorf("Failed to get synth logger pod due to %v", err)
} }

View File

@ -27,6 +27,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -240,7 +241,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
By(fmt.Sprintf("New nodes: %v\n", newNodesSet)) By(fmt.Sprintf("New nodes: %v\n", newNodesSet))
registeredNodes := sets.NewString() registeredNodes := sets.NewString()
for nodeName := range newNodesSet { for nodeName := range newNodesSet {
node, err := f.ClientSet.Core().Nodes().Get(nodeName) node, err := f.ClientSet.Core().Nodes().Get(nodeName, metav1.GetOptions{})
if err == nil && node != nil { if err == nil && node != nil {
registeredNodes.Insert(nodeName) registeredNodes.Insert(nodeName)
} else { } else {

View File

@ -21,6 +21,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -50,7 +51,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil)) p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
f.WaitForPodReady(p.Name) f.WaitForPodReady(p.Name)
p, err := podClient.Get(p.Name) p, err := podClient.Get(p.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p) isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -76,14 +77,14 @@ var _ = framework.KubeDescribe("Probing container", func() {
It("with readiness probe that fails should never be ready and never restart [Conformance]", func() { It("with readiness probe that fails should never be ready and never restart [Conformance]", func() {
p := podClient.Create(makePodSpec(probe.withFailing().build(), nil)) p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
Consistently(func() (bool, error) { Consistently(func() (bool, error) {
p, err := podClient.Get(p.Name) p, err := podClient.Get(p.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
return v1.IsPodReady(p), nil return v1.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready") }, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready")
p, err := podClient.Get(p.Name) p, err := podClient.Get(p.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p) isReady, err := testutils.PodRunningReady(p)
@ -366,7 +367,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
// Check the pod's current state and verify that restartCount is present. // Check the pod's current state and verify that restartCount is present.
By("checking the pod's current state and verifying that restartCount is present") By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(pod.Name) pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount) framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
@ -376,7 +377,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
lastRestartCount := initialRestartCount lastRestartCount := initialRestartCount
observedRestarts := int32(0) observedRestarts := int32(0)
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) { for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = podClient.Get(pod.Name) pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount { if restartCount != lastRestartCount {

View File

@ -22,6 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -133,7 +134,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
By("Creating the pod") By("Creating the pod")
podClient.CreateSync(pod) podClient.CreateSync(pod)
pod, err := podClient.Get(pod.Name) pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name) Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
Eventually(func() (string, error) { Eventually(func() (string, error) {

View File

@ -27,6 +27,7 @@ import (
"golang.org/x/net/websocket" "golang.org/x/net/websocket"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
@ -54,7 +55,7 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
hostIPTimeout := 2 * time.Minute hostIPTimeout := 2 * time.Minute
t := time.Now() t := time.Now()
for { for {
p, err := podClient.Get(pod.Name) p, err := podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name) Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
if p.Status.HostIP != "" { if p.Status.HostIP != "" {
framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
@ -100,7 +101,7 @@ func getRestartDelay(podClient *framework.PodClient, podName string, containerNa
beginTime := time.Now() beginTime := time.Now()
for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay
time.Sleep(time.Second) time.Sleep(time.Second)
pod, err := podClient.Get(podName) pod, err := podClient.Get(podName, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
status, ok := v1.GetContainerStatus(pod.Status.ContainerStatuses, containerName) status, ok := v1.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
if !ok { if !ok {
@ -201,7 +202,7 @@ var _ = framework.KubeDescribe("Pods", func() {
// may be carried out immediately rather than gracefully. // may be carried out immediately rather than gracefully.
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
// save the running pod // save the running pod
pod, err = podClient.Get(pod.Name) pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod")
framework.Logf("running pod: %#v", pod) framework.Logf("running pod: %#v", pod)

View File

@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/sysctl" "k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -107,7 +108,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
By("Waiting for pod completion") By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name) err = f.WaitForPodNoLongerRunning(pod.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
pod, err = podClient.Get(pod.Name) pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Checking that the pod succeeded") By("Checking that the pod succeeded")
@ -148,7 +149,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
By("Waiting for pod completion") By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name) err = f.WaitForPodNoLongerRunning(pod.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
pod, err = podClient.Get(pod.Name) pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Checking that the pod succeeded") By("Checking that the pod succeeded")

View File

@ -148,7 +148,7 @@ func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *v1.Pod
serverPod = podClient.CreateSync(serverPod) serverPod = podClient.CreateSync(serverPod)
By("locating the server pod") By("locating the server pod")
pod, err := podClient.Get(serverPod.Name) pod, err := podClient.Get(serverPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err) framework.ExpectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err)
By("sleeping a bit to give the server time to start") By("sleeping a bit to give the server time to start")

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller/job" "k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/pkg/runtime/schema" "k8s.io/kubernetes/pkg/runtime/schema"
@ -235,7 +236,7 @@ func createCronJob(c clientset.Interface, ns string, cronJob *batch.CronJob) (*b
} }
func getCronJob(c clientset.Interface, ns, name string) (*batch.CronJob, error) { func getCronJob(c clientset.Interface, ns, name string) (*batch.CronJob, error) {
return c.BatchV2alpha1().CronJobs(ns).Get(name) return c.BatchV2alpha1().CronJobs(ns).Get(name, metav1.GetOptions{})
} }
func deleteCronJob(c clientset.Interface, ns, name string) error { func deleteCronJob(c clientset.Interface, ns, name string) error {
@ -245,7 +246,7 @@ func deleteCronJob(c clientset.Interface, ns, name string) error {
// Wait for at least given amount of active jobs. // Wait for at least given amount of active jobs.
func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error { func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
curr, err := c.BatchV2alpha1().CronJobs(ns).Get(cronJobName) curr, err := c.BatchV2alpha1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -256,7 +257,7 @@ func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int
// Wait for no jobs to appear. // Wait for no jobs to appear.
func waitForNoJobs(c clientset.Interface, ns, jobName string) error { func waitForNoJobs(c clientset.Interface, ns, jobName string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
curr, err := c.BatchV2alpha1().CronJobs(ns).Get(jobName) curr, err := c.BatchV2alpha1().CronJobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -312,7 +313,7 @@ func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
// checkNoUnexpectedEvents checks unexpected events didn't happen. // checkNoUnexpectedEvents checks unexpected events didn't happen.
// Currently only "UnexpectedJob" is checked. // Currently only "UnexpectedJob" is checked.
func checkNoUnexpectedEvents(c clientset.Interface, ns, cronJobName string) error { func checkNoUnexpectedEvents(c clientset.Interface, ns, cronJobName string) error {
sj, err := c.BatchV2alpha1().CronJobs(ns).Get(cronJobName) sj, err := c.BatchV2alpha1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("error in getting cronjob %s/%s: %v", ns, cronJobName, err) return fmt.Errorf("error in getting cronjob %s/%s: %v", ns, cronJobName, err)
} }

View File

@ -303,7 +303,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
var newNode *v1.Node var newNode *v1.Node
var newLabels map[string]string var newLabels map[string]string
err := wait.Poll(dsRetryPeriod, dsRetryTimeout, func() (bool, error) { err := wait.Poll(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
node, err := nodeClient.Get(nodeName) node, err := nodeClient.Get(nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -385,7 +385,7 @@ func checkRunningOnNoNodes(f *framework.Framework, selector map[string]string) f
} }
func checkDaemonStatus(f *framework.Framework, dsName string) error { func checkDaemonStatus(f *framework.Framework, dsName string) error {
ds, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).Get(dsName) ds, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("Could not get daemon set from v1.") return fmt.Errorf("Could not get daemon set from v1.")
} }

View File

@ -150,7 +150,7 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
// checkDeploymentRevision checks if the input deployment's and its new replica set's revision and images are as expected. // checkDeploymentRevision checks if the input deployment's and its new replica set's revision and images are as expected.
func checkDeploymentRevision(c clientset.Interface, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *extensions.ReplicaSet) { func checkDeploymentRevision(c clientset.Interface, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *extensions.ReplicaSet) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Check revision of the new replica set of this deployment // Check revision of the new replica set of this deployment
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
@ -180,7 +180,7 @@ func stopDeployment(c clientset.Interface, internalClient internalclientset.Inte
} }
func stopDeploymentMaybeOverlap(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName, overlapWith string) { func stopDeploymentMaybeOverlap(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName, overlapWith string) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf("Deleting deployment %s", deploymentName) framework.Logf("Deleting deployment %s", deploymentName)
@ -191,7 +191,7 @@ func stopDeploymentMaybeOverlap(c clientset.Interface, internalClient internalcl
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensuring deployment %s was deleted", deploymentName) framework.Logf("Ensuring deployment %s was deleted", deploymentName)
_, err = c.Extensions().Deployments(ns).Get(deployment.Name) _, err = c.Extensions().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(errors.IsNotFound(err)).To(BeTrue()) Expect(errors.IsNotFound(err)).To(BeTrue())
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
@ -259,7 +259,7 @@ func testNewDeployment(f *framework.Framework) {
err = framework.WaitForDeploymentStatus(c, deploy) err = framework.WaitForDeploymentStatus(c, deploy)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -291,7 +291,7 @@ func testDeleteDeployment(f *framework.Framework) {
err = framework.WaitForDeploymentStatus(c, deploy) err = framework.WaitForDeploymentStatus(c, deploy)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -337,7 +337,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// There should be 1 old RS (nginx-controller, which is adopted) // There should be 1 old RS (nginx-controller, which is adopted)
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c) _, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -388,7 +388,7 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) {
err = framework.WaitForDeploymentStatus(c, deploy) err = framework.WaitForDeploymentStatus(c, deploy)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected. We use events to verify that. // Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.WaitForEvents(c, ns, deployment, 2) framework.WaitForEvents(c, ns, deployment, 2)
events, err := c.Core().Events(ns).Search(deployment) events, err := c.Core().Events(ns).Search(deployment)
@ -441,7 +441,7 @@ func testRecreateDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected. We use events to verify that. // Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.WaitForEvents(c, ns, deployment, 2) framework.WaitForEvents(c, ns, deployment, 2)
events, err := c.Core().Events(ns).Search(deployment) events, err := c.Core().Events(ns).Search(deployment)
@ -572,7 +572,7 @@ func testRolloverDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected. // Verify that the pods were scaled up and down as expected.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName) framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1 // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
@ -619,7 +619,7 @@ func testPausedDeployment(f *framework.Framework) {
_, err := c.Extensions().Deployments(ns).Create(d) _, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Check that deployment is created fine. // Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that there is no latest state realized for the new deployment. // Verify that there is no latest state realized for the new deployment.
@ -846,7 +846,7 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Check that the replica set we created still doesn't contain revision information // Check that the replica set we created still doesn't contain revision information
rs, err = c.Extensions().ReplicaSets(ns).Get(rsName) rs, err = c.Extensions().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(rs.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal("")) Expect(rs.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(""))
@ -976,7 +976,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// There should be no old RSs (overlapping RS) // There should be no old RSs (overlapping RS)
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c) oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -1011,7 +1011,7 @@ func testScalePausedDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Check that deployment is created fine. // Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
@ -1091,7 +1091,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName) deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) { if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) {
@ -1102,7 +1102,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
second, err := deploymentutil.GetNewReplicaSet(deployment, c) second, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
first, err = c.Extensions().ReplicaSets(first.Namespace).Get(first.Name) first, err = c.Extensions().ReplicaSets(first.Namespace).Get(first.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
firstCond := replicaSetHasDesiredReplicas(c.Extensions(), first) firstCond := replicaSetHasDesiredReplicas(c.Extensions(), first)
@ -1151,7 +1151,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName) deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) { if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) {
@ -1159,7 +1159,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
} }
By(fmt.Sprintf("Checking that the replica sets for %q are synced", deploymentName)) By(fmt.Sprintf("Checking that the replica sets for %q are synced", deploymentName))
oldRs, err := c.Extensions().ReplicaSets(rs.Namespace).Get(rs.Name) oldRs, err := c.Extensions().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
newRs, err := deploymentutil.GetNewReplicaSet(deployment, c) newRs, err := deploymentutil.GetNewReplicaSet(deployment, c)
@ -1459,7 +1459,7 @@ func testIterativeDeployments(f *framework.Framework) {
} }
// unpause the deployment if we end up pausing it // unpause the deployment if we end up pausing it
deployment, err = c.Extensions().Deployments(ns).Get(deployment.Name) deployment, err = c.Extensions().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if deployment.Spec.Paused { if deployment.Spec.Paused {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
@ -1480,7 +1480,7 @@ func testIterativeDeployments(f *framework.Framework) {
func replicaSetHasDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) wait.ConditionFunc { func replicaSetHasDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) wait.ConditionFunc {
desiredGeneration := replicaSet.Generation desiredGeneration := replicaSet.Generation
return func() (bool, error) { return func() (bool, error) {
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name) rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -237,7 +237,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("retrieving the pod") By("retrieving the pod")
pod, err := podClient.Get(pod.Name) pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get pod %s: %v", pod.Name, err) framework.Failf("Failed to get pod %s: %v", pod.Name, err)
} }
@ -266,7 +266,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("retrieving the pod") By("retrieving the pod")
pod, err := podClient.Get(pod.Name) pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get pod %s: %v", pod.Name, err) framework.Failf("Failed to get pod %s: %v", pod.Name, err)
} }

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -230,7 +231,7 @@ func getScheduableCores(nodes []v1.Node) int64 {
} }
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) { func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
cm, err := c.Core().ConfigMaps(api.NamespaceSystem).Get(DNSAutoscalerLabelName) cm, err := c.Core().ConfigMaps(api.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -22,6 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -79,7 +80,7 @@ var _ = framework.KubeDescribe("Events", func() {
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
By("retrieving the pod") By("retrieving the pod")
podWithUid, err := podClient.Get(pod.Name) podWithUid, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get pod: %v", err) framework.Failf("Failed to get pod: %v", err)
} }

View File

@ -27,6 +27,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -398,7 +399,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
err := framework.WaitForPodNameRunningInNamespace(c, podName, ns) err := framework.WaitForPodNameRunningInNamespace(c, podName, ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
pod, err := c.Core().Pods(ns).Get(podName) pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
stat := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName) stat := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -227,7 +228,7 @@ func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, cluste
// ingress should be present in underlying clusters unless orphanDependents is false. // ingress should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Extensions().Ingresses(nsName).Get(ingressName) _, err := clusterClientset.Extensions().Ingresses(nsName).Get(ingressName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for ingress %s in cluster %s, expected ingress to exist", ingressName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for ingress %s in cluster %s, expected ingress to exist", ingressName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {
@ -247,7 +248,7 @@ func waitForIngressOrFail(clientset *kubeclientset.Clientset, namespace string,
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace)) By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
var clusterIngress *v1beta1.Ingress var clusterIngress *v1beta1.Ingress
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterIngress, err := clientset.Ingresses(namespace).Get(ingress.Name) clusterIngress, err := clientset.Ingresses(namespace).Get(ingress.Name, metav1.GetOptions{})
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone. if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is absent", ingress.Name, namespace)) By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is absent", ingress.Name, namespace))
return true, nil // Success return true, nil // Success
@ -293,7 +294,7 @@ func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingres
func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, ingress *v1beta1.Ingress, timeout time.Duration) { func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, ingress *v1beta1.Ingress, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace)) By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterIngress, err := clientset.Ingresses(namespace).Get(ingress.Name) clusterIngress, err := clientset.Ingresses(namespace).Get(ingress.Name, metav1.GetOptions{})
if err == nil { // We want it present, and the Get succeeded, so we're all good. if err == nil { // We want it present, and the Get succeeded, so we're all good.
if equivalentIngress(*clusterIngress, *ingress) { if equivalentIngress(*clusterIngress, *ingress) {
By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is updated", ingress.Name, namespace)) By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is updated", ingress.Name, namespace))
@ -326,7 +327,7 @@ func deleteIngressOrFail(clientset *fedclientset.Clientset, namespace string, in
framework.ExpectNoError(err, "Error deleting ingress %q from namespace %q", ingressName, namespace) framework.ExpectNoError(err, "Error deleting ingress %q from namespace %q", ingressName, namespace)
// Wait for the ingress to be deleted. // Wait for the ingress to be deleted.
err = wait.Poll(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) { err = wait.Poll(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientset.Extensions().Ingresses(namespace).Get(ingressName) _, err := clientset.Extensions().Ingresses(namespace).Get(ingressName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil
} }
@ -456,7 +457,7 @@ func waitForFederatedIngressAddress(c *fedclientset.Clientset, ns, ingName strin
// waitForFederatedIngressExists waits for the Ingress object exists. // waitForFederatedIngressExists waits for the Ingress object exists.
func waitForFederatedIngressExists(c *fedclientset.Clientset, ns, ingName string, timeout time.Duration) error { func waitForFederatedIngressExists(c *fedclientset.Clientset, ns, ingName string, timeout time.Duration) error {
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) { err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
_, err := c.Extensions().Ingresses(ns).Get(ingName) _, err := c.Extensions().Ingresses(ns).Get(ingName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("Waiting for Ingress %v, error %v", ingName, err) framework.Logf("Waiting for Ingress %v, error %v", ingName, err)
return false, nil return false, nil
@ -468,7 +469,7 @@ func waitForFederatedIngressExists(c *fedclientset.Clientset, ns, ingName string
// getFederatedIngressAddress returns the ips/hostnames associated with the Ingress. // getFederatedIngressAddress returns the ips/hostnames associated with the Ingress.
func getFederatedIngressAddress(client *fedclientset.Clientset, ns, name string) ([]string, error) { func getFederatedIngressAddress(client *fedclientset.Clientset, ns, name string) ([]string, error) {
ing, err := client.Extensions().Ingresses(ns).Get(name) ing, err := client.Extensions().Ingresses(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
api_v1 "k8s.io/kubernetes/pkg/api/v1" api_v1 "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -132,7 +133,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
f.FederationClientset_1_5.Core().Namespaces().Delete) f.FederationClientset_1_5.Core().Namespaces().Delete)
By(fmt.Sprintf("Verify that event %s was deleted as well", event.Name)) By(fmt.Sprintf("Verify that event %s was deleted as well", event.Name))
latestEvent, err := f.FederationClientset_1_5.Core().Events(nsName).Get(event.Name) latestEvent, err := f.FederationClientset_1_5.Core().Events(nsName).Get(event.Name, metav1.GetOptions{})
if !errors.IsNotFound(err) { if !errors.IsNotFound(err) {
framework.Failf("Event %s should have been deleted. Found: %v", event.Name, latestEvent) framework.Failf("Event %s should have been deleted. Found: %v", event.Name, latestEvent)
} }
@ -150,7 +151,7 @@ func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters m
By(fmt.Sprintf("Waiting for namespace %s to be created in all underlying clusters", nsName)) By(fmt.Sprintf("Waiting for namespace %s to be created in all underlying clusters", nsName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) { err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters { for _, cluster := range clusters {
_, err := cluster.Core().Namespaces().Get(nsName) _, err := cluster.Core().Namespaces().Get(nsName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
return false, err return false, err
} }
@ -170,7 +171,7 @@ func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters m
// namespace should be present in underlying clusters unless orphanDependents is false. // namespace should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Core().Namespaces().Get(nsName) _, err := clusterClientset.Core().Namespaces().Get(nsName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for namespace %s in cluster %s, expected namespace to exist", nsName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for namespace %s in cluster %s, expected namespace to exist", nsName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -116,7 +117,7 @@ func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, cluster
By(fmt.Sprintf("Waiting for secret %s to be created in all underlying clusters", secretName)) By(fmt.Sprintf("Waiting for secret %s to be created in all underlying clusters", secretName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) { err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters { for _, cluster := range clusters {
_, err := cluster.Core().Secrets(nsName).Get(secretName) _, err := cluster.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{})
if err != nil { if err != nil {
if !errors.IsNotFound(err) { if !errors.IsNotFound(err) {
return false, err return false, err
@ -136,7 +137,7 @@ func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, cluster
// secret should be present in underlying clusters unless orphanDependents is false. // secret should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Core().Secrets(nsName).Get(secretName) _, err := clusterClientset.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for secret %s in cluster %s, expected secret to exist", secretName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for secret %s in cluster %s, expected secret to exist", secretName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {
@ -173,7 +174,7 @@ func deleteSecretOrFail(clientset *fedclientset.Clientset, nsName string, secret
// Wait for the secret to be deleted. // Wait for the secret to be deleted.
err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) { err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientset.Core().Secrets(nsName).Get(secretName) _, err := clientset.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil
} }
@ -191,7 +192,7 @@ func updateSecretOrFail(clientset *fedclientset.Clientset, nsName string, secret
var newSecret *v1.Secret var newSecret *v1.Secret
for retryCount := 0; retryCount < MaxRetries; retryCount++ { for retryCount := 0; retryCount < MaxRetries; retryCount++ {
secret, err := clientset.Core().Secrets(nsName).Get(secretName) secret, err := clientset.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("failed to get secret %q: %v", secretName, err) framework.Failf("failed to get secret %q: %v", secretName, err)
} }
@ -223,7 +224,7 @@ func waitForSecretOrFail(clientset *kubeclientset.Clientset, nsName string, secr
By(fmt.Sprintf("Fetching a federated secret shard of secret %q in namespace %q from cluster", secret.Name, nsName)) By(fmt.Sprintf("Fetching a federated secret shard of secret %q in namespace %q from cluster", secret.Name, nsName))
var clusterSecret *v1.Secret var clusterSecret *v1.Secret
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterSecret, err := clientset.Core().Secrets(nsName).Get(secret.Name) clusterSecret, err := clientset.Core().Secrets(nsName).Get(secret.Name, metav1.GetOptions{})
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone. if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is absent", secret.Name, nsName)) By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is absent", secret.Name, nsName))
return true, nil // Success return true, nil // Success
@ -252,7 +253,7 @@ func waitForSecretShardsUpdatedOrFail(nsName string, secret *v1.Secret, clusters
func waitForSecretUpdateOrFail(clientset *kubeclientset.Clientset, nsName string, secret *v1.Secret, timeout time.Duration) { func waitForSecretUpdateOrFail(clientset *kubeclientset.Clientset, nsName string, secret *v1.Secret, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated secret shard of secret %q in namespace %q from cluster", secret.Name, nsName)) By(fmt.Sprintf("Fetching a federated secret shard of secret %q in namespace %q from cluster", secret.Name, nsName))
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterSecret, err := clientset.Core().Secrets(nsName).Get(secret.Name) clusterSecret, err := clientset.Core().Secrets(nsName).Get(secret.Name, metav1.GetOptions{})
if err == nil { // We want it present, and the Get succeeded, so we're all good. if err == nil { // We want it present, and the Get succeeded, so we're all good.
if util.SecretEquivalent(*clusterSecret, *secret) { if util.SecretEquivalent(*clusterSecret, *secret) {
By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is updated", secret.Name, nsName)) By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is updated", secret.Name, nsName))

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -129,7 +130,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
By(fmt.Sprintf("Deletion of service %q in namespace %q succeeded.", service.Name, nsName)) By(fmt.Sprintf("Deletion of service %q in namespace %q succeeded.", service.Name, nsName))
By(fmt.Sprintf("Verifying that services in underlying clusters are not deleted")) By(fmt.Sprintf("Verifying that services in underlying clusters are not deleted"))
for clusterName, clusterClientset := range clusters { for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Core().Services(service.Namespace).Get(service.Name) _, err := clusterClientset.Core().Services(service.Namespace).Get(service.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Unexpected error in fetching service %s in cluster %s, %s", service.Name, clusterName, err) framework.Failf("Unexpected error in fetching service %s in cluster %s, %s", service.Name, clusterName, err)
} }

View File

@ -24,6 +24,7 @@ import (
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -124,7 +125,7 @@ func newService(name, namespace string) *v1.Service {
// Verify that the cluster is marked ready. // Verify that the cluster is marked ready.
func isReady(clusterName string, clientset *federation_release_1_5.Clientset) error { func isReady(clusterName string, clientset *federation_release_1_5.Clientset) error {
return wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) { return wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
c, err := clientset.Federation().Clusters().Get(clusterName) c, err := clientset.Federation().Clusters().Get(clusterName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -120,7 +121,7 @@ func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters ma
By(fmt.Sprintf("Waiting for daemonset %s to be created in all underlying clusters", daemonsetName)) By(fmt.Sprintf("Waiting for daemonset %s to be created in all underlying clusters", daemonsetName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) { err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters { for _, cluster := range clusters {
_, err := cluster.Extensions().DaemonSets(nsName).Get(daemonsetName) _, err := cluster.Extensions().DaemonSets(nsName).Get(daemonsetName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return false, nil return false, nil
} }
@ -140,7 +141,7 @@ func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters ma
// daemon set should be present in underlying clusters unless orphanDependents is false. // daemon set should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Extensions().DaemonSets(nsName).Get(daemonsetName) _, err := clusterClientset.Extensions().DaemonSets(nsName).Get(daemonsetName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for daemonset %s in cluster %s, expected daemonset to exist", daemonsetName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for daemonset %s in cluster %s, expected daemonset to exist", daemonsetName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {
@ -193,7 +194,7 @@ func deleteDaemonSetOrFail(clientset *fedclientset.Clientset, nsName string, dae
// Wait for the daemonset to be deleted. // Wait for the daemonset to be deleted.
err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) { err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientset.Extensions().DaemonSets(nsName).Get(daemonsetName) _, err := clientset.Extensions().DaemonSets(nsName).Get(daemonsetName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil
} }
@ -211,7 +212,7 @@ func updateDaemonSetOrFail(clientset *fedclientset.Clientset, namespace string)
var newDaemonSet *v1beta1.DaemonSet var newDaemonSet *v1beta1.DaemonSet
for retryCount := 0; retryCount < FederatedDaemonSetMaxRetries; retryCount++ { for retryCount := 0; retryCount < FederatedDaemonSetMaxRetries; retryCount++ {
daemonset, err := clientset.Extensions().DaemonSets(namespace).Get(FederatedDaemonSetName) daemonset, err := clientset.Extensions().DaemonSets(namespace).Get(FederatedDaemonSetName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("failed to get daemonset %q: %v", FederatedDaemonSetName, err) framework.Failf("failed to get daemonset %q: %v", FederatedDaemonSetName, err)
} }
@ -241,7 +242,7 @@ func waitForDaemonSetOrFail(clientset *kubeclientset.Clientset, namespace string
By(fmt.Sprintf("Fetching a federated daemonset shard of daemonset %q in namespace %q from cluster", daemonset.Name, namespace)) By(fmt.Sprintf("Fetching a federated daemonset shard of daemonset %q in namespace %q from cluster", daemonset.Name, namespace))
var clusterDaemonSet *v1beta1.DaemonSet var clusterDaemonSet *v1beta1.DaemonSet
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterDaemonSet, err := clientset.Extensions().DaemonSets(namespace).Get(daemonset.Name) clusterDaemonSet, err := clientset.Extensions().DaemonSets(namespace).Get(daemonset.Name, metav1.GetOptions{})
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone. if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated daemonset %q in namespace %q in cluster is absent", daemonset.Name, namespace)) By(fmt.Sprintf("Success: shard of federated daemonset %q in namespace %q in cluster is absent", daemonset.Name, namespace))
return true, nil // Success return true, nil // Success
@ -270,7 +271,7 @@ func waitForDaemonSetShardsUpdatedOrFail(namespace string, daemonset *v1beta1.Da
func waitForDaemonSetUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, daemonset *v1beta1.DaemonSet, timeout time.Duration) { func waitForDaemonSetUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, daemonset *v1beta1.DaemonSet, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated daemonset shard of daemonset %q in namespace %q from cluster", daemonset.Name, namespace)) By(fmt.Sprintf("Fetching a federated daemonset shard of daemonset %q in namespace %q from cluster", daemonset.Name, namespace))
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterDaemonSet, err := clientset.Extensions().DaemonSets(namespace).Get(daemonset.Name) clusterDaemonSet, err := clientset.Extensions().DaemonSets(namespace).Get(daemonset.Name, metav1.GetOptions{})
if err == nil { // We want it present, and the Get succeeded, so we're all good. if err == nil { // We want it present, and the Get succeeded, so we're all good.
if util.ObjectMetaAndSpecEquivalent(clusterDaemonSet, daemonset) { if util.ObjectMetaAndSpecEquivalent(clusterDaemonSet, daemonset) {
By(fmt.Sprintf("Success: shard of federated daemonset %q in namespace %q in cluster is updated", daemonset.Name, namespace)) By(fmt.Sprintf("Success: shard of federated daemonset %q in namespace %q in cluster is updated", daemonset.Name, namespace))

View File

@ -154,7 +154,7 @@ func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clu
By(fmt.Sprintf("Waiting for deployment %s to be created in all underlying clusters", deploymentName)) By(fmt.Sprintf("Waiting for deployment %s to be created in all underlying clusters", deploymentName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) { err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters { for _, cluster := range clusters {
_, err := cluster.Extensions().Deployments(nsName).Get(deploymentName) _, err := cluster.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return false, nil return false, nil
} }
@ -174,7 +174,7 @@ func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clu
// deployment should be present in underlying clusters unless orphanDependents is false. // deployment should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Extensions().Deployments(nsName).Get(deploymentName) _, err := clusterClientset.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for deployment %s in cluster %s, expected deployment to exist", deploymentName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for deployment %s in cluster %s, expected deployment to exist", deploymentName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {
@ -193,13 +193,13 @@ func waitForDeploymentOrFail(c *fedclientset.Clientset, namespace string, deploy
func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters map[string]*cluster) error { func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters map[string]*cluster) error {
err := wait.Poll(10*time.Second, FederatedDeploymentTimeout, func() (bool, error) { err := wait.Poll(10*time.Second, FederatedDeploymentTimeout, func() (bool, error) {
fdep, err := c.Deployments(namespace).Get(deploymentName) fdep, err := c.Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
specReplicas, statusReplicas := int32(0), int32(0) specReplicas, statusReplicas := int32(0), int32(0)
for _, cluster := range clusters { for _, cluster := range clusters {
dep, err := cluster.Deployments(namespace).Get(deploymentName) dep, err := cluster.Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
By(fmt.Sprintf("Failed getting deployment: %q/%q/%q, err: %v", cluster.name, namespace, deploymentName, err)) By(fmt.Sprintf("Failed getting deployment: %q/%q/%q, err: %v", cluster.name, namespace, deploymentName, err))
return false, err return false, err
@ -265,7 +265,7 @@ func deleteDeploymentOrFail(clientset *fedclientset.Clientset, nsName string, de
// Wait for the deployment to be deleted. // Wait for the deployment to be deleted.
err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) { err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientset.Extensions().Deployments(nsName).Get(deploymentName) _, err := clientset.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil
} }

View File

@ -156,7 +156,7 @@ func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clu
By(fmt.Sprintf("Waiting for replica sets %s to be created in all underlying clusters", replicaSetName)) By(fmt.Sprintf("Waiting for replica sets %s to be created in all underlying clusters", replicaSetName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) { err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters { for _, cluster := range clusters {
_, err := cluster.Extensions().ReplicaSets(nsName).Get(replicaSetName) _, err := cluster.Extensions().ReplicaSets(nsName).Get(replicaSetName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return false, nil return false, nil
} }
@ -174,7 +174,7 @@ func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clu
By(fmt.Sprintf("Verifying replica sets %s in underlying clusters", replicaSetName)) By(fmt.Sprintf("Verifying replica sets %s in underlying clusters", replicaSetName))
errMessages := []string{} errMessages := []string{}
for clusterName, clusterClientset := range clusters { for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Extensions().ReplicaSets(nsName).Get(replicaSetName) _, err := clusterClientset.Extensions().ReplicaSets(nsName).Get(replicaSetName, metav1.GetOptions{})
if (orphanDependents == nil || *orphanDependents == true) && errors.IsNotFound(err) { if (orphanDependents == nil || *orphanDependents == true) && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for replica set %s in cluster %s, expected replica set to exist", replicaSetName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for replica set %s in cluster %s, expected replica set to exist", replicaSetName, clusterName))
} else if (orphanDependents != nil && *orphanDependents == false) && (err == nil || !errors.IsNotFound(err)) { } else if (orphanDependents != nil && *orphanDependents == false) && (err == nil || !errors.IsNotFound(err)) {
@ -193,13 +193,13 @@ func waitForReplicaSetOrFail(c *fedclientset.Clientset, namespace string, replic
func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters map[string]*cluster) error { func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters map[string]*cluster) error {
err := wait.Poll(10*time.Second, FederatedReplicaSetTimeout, func() (bool, error) { err := wait.Poll(10*time.Second, FederatedReplicaSetTimeout, func() (bool, error) {
frs, err := c.ReplicaSets(namespace).Get(replicaSetName) frs, err := c.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
specReplicas, statusReplicas := int32(0), int32(0) specReplicas, statusReplicas := int32(0), int32(0)
for _, cluster := range clusters { for _, cluster := range clusters {
rs, err := cluster.ReplicaSets(namespace).Get(replicaSetName) rs, err := cluster.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
By(fmt.Sprintf("Failed getting replicaset: %q/%q/%q, err: %v", cluster.name, namespace, replicaSetName, err)) By(fmt.Sprintf("Failed getting replicaset: %q/%q/%q, err: %v", cluster.name, namespace, replicaSetName, err))
return false, err return false, err
@ -251,7 +251,7 @@ func deleteReplicaSetOrFail(clientset *fedclientset.Clientset, nsName string, re
// Wait for the replicaSet to be deleted. // Wait for the replicaSet to be deleted.
err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) { err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientset.Extensions().ReplicaSets(nsName).Get(replicaSetName) _, err := clientset.Extensions().ReplicaSets(nsName).Get(replicaSetName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil
} }

View File

@ -22,13 +22,14 @@ import (
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
apiv1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
func createClusterObjectOrFail_14(f *framework.Framework, context *framework.E2EContext) { func createClusterObjectOrFail_14(f *framework.Framework, context *framework.E2EContext) {
framework.Logf("Looking up cluster: %s", context.Name) framework.Logf("Looking up cluster: %s", context.Name)
foundCluster, err := f.FederationClientset_1_5.Federation().Clusters().Get(context.Name) foundCluster, err := f.FederationClientset_1_5.Federation().Clusters().Get(context.Name, metav1.GetOptions{})
if err == nil && foundCluster != nil { if err == nil && foundCluster != nil {
return return
} }
@ -68,7 +69,7 @@ func buildClustersOrFail_14(f *framework.Framework) []*federationapi.Cluster {
// Wait for all clusters to become ready for up to 5 min. // Wait for all clusters to become ready for up to 5 min.
if err := wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) { if err := wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) {
for _, context := range contexts { for _, context := range contexts {
cluster, err := f.FederationClientset_1_5.Federation().Clusters().Get(context.Name) cluster, err := f.FederationClientset_1_5.Federation().Clusters().Get(context.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
@ -85,7 +86,7 @@ func createClusterObjectOrFail(f *framework.Framework, context *framework.E2ECon
} }
func clusterIsReadyOrFail(f *framework.Framework, context *framework.E2EContext) { func clusterIsReadyOrFail(f *framework.Framework, context *framework.E2EContext) {
c, err := f.FederationClientset_1_5.Federation().Clusters().Get(context.Name) c, err := f.FederationClientset_1_5.Federation().Clusters().Get(context.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("get cluster: %+v", err)) framework.ExpectNoError(err, fmt.Sprintf("get cluster: %+v", err))
if c.ObjectMeta.Name != context.Name { if c.ObjectMeta.Name != context.Name {
framework.Failf("cluster name does not match input context: actual=%+v, expected=%+v", c, context) framework.Failf("cluster name does not match input context: actual=%+v, expected=%+v", c, context)
@ -140,7 +141,7 @@ func createNamespaceInClusters(clusters map[string]*cluster, f *framework.Framew
for name, c := range clusters { for name, c := range clusters {
// The e2e Framework created the required namespace in federation control plane, but we need to create it in all the others, if it doesn't yet exist. // The e2e Framework created the required namespace in federation control plane, but we need to create it in all the others, if it doesn't yet exist.
// TODO(nikhiljindal): remove this once we have the namespace controller working as expected. // TODO(nikhiljindal): remove this once we have the namespace controller working as expected.
if _, err := c.Clientset.Core().Namespaces().Get(nsName); errors.IsNotFound(err) { if _, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}); errors.IsNotFound(err) {
ns := &v1.Namespace{ ns := &v1.Namespace{
ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: nsName, Name: nsName,
@ -164,7 +165,7 @@ func unregisterClusters(clusters map[string]*cluster, f *framework.Framework) {
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
for name, c := range clusters { for name, c := range clusters {
if c.namespaceCreated { if c.namespaceCreated {
if _, err := c.Clientset.Core().Namespaces().Get(nsName); !errors.IsNotFound(err) { if _, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}); !errors.IsNotFound(err) {
err := c.Clientset.Core().Namespaces().Delete(nsName, &v1.DeleteOptions{}) err := c.Clientset.Core().Namespaces().Delete(nsName, &v1.DeleteOptions{})
framework.ExpectNoError(err, "Couldn't delete the namespace %s in cluster %q: %v", nsName, name, err) framework.ExpectNoError(err, "Couldn't delete the namespace %s in cluster %q: %v", nsName, name, err)
} }
@ -217,7 +218,7 @@ func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string,
By(fmt.Sprintf("Fetching a federated service shard of service %q in namespace %q from cluster", service.Name, namespace)) By(fmt.Sprintf("Fetching a federated service shard of service %q in namespace %q from cluster", service.Name, namespace))
var clusterService *v1.Service var clusterService *v1.Service
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterService, err := clientset.Services(namespace).Get(service.Name) clusterService, err := clientset.Services(namespace).Get(service.Name, metav1.GetOptions{})
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone. if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated service %q in namespace %q in cluster is absent", service.Name, namespace)) By(fmt.Sprintf("Success: shard of federated service %q in namespace %q in cluster is absent", service.Name, namespace))
return true, nil // Success return true, nil // Success
@ -297,7 +298,7 @@ func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Serv
err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) {
var err error var err error
cSvc, err = c.Clientset.Services(namespace).Get(service.Name) cSvc, err = c.Clientset.Services(namespace).Get(service.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
// Get failed with an error, try again. // Get failed with an error, try again.
framework.Logf("Failed to find service %q in namespace %q, in cluster %q: %v. Trying again in %s", service.Name, namespace, name, err, framework.Poll) framework.Logf("Failed to find service %q in namespace %q, in cluster %q: %v. Trying again in %s", service.Name, namespace, name, err, framework.Poll)
@ -385,7 +386,7 @@ func podExitCodeDetector(f *framework.Framework, name, namespace string, code in
} }
return func() error { return func() error {
pod, err := f.ClientSet.Core().Pods(namespace).Get(name) pod, err := f.ClientSet.Core().Pods(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return logerr(err) return logerr(err)
} }

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/unversioned/remotecommand" "k8s.io/kubernetes/pkg/client/unversioned/remotecommand"
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
@ -112,14 +113,14 @@ func (f *Framework) ExecShellInContainer(podName, containerName string, cmd stri
} }
func (f *Framework) ExecCommandInPod(podName string, cmd ...string) string { func (f *Framework) ExecCommandInPod(podName string, cmd ...string) string {
pod, err := f.PodClient().Get(podName) pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to get pod") Expect(err).NotTo(HaveOccurred(), "failed to get pod")
Expect(pod.Spec.Containers).NotTo(BeEmpty()) Expect(pod.Spec.Containers).NotTo(BeEmpty())
return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...) return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...)
} }
func (f *Framework) ExecCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) { func (f *Framework) ExecCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
pod, err := f.PodClient().Get(podName) pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to get pod") Expect(err).NotTo(HaveOccurred(), "failed to get pod")
Expect(pod.Spec.Containers).NotTo(BeEmpty()) Expect(pod.Spec.Containers).NotTo(BeEmpty())
return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...) return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...)

View File

@ -34,6 +34,7 @@ import (
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
@ -283,7 +284,7 @@ func (f *Framework) deleteFederationNs() {
} }
// Verify that it got deleted. // Verify that it got deleted.
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) { err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
if _, err := clientset.Core().Namespaces().Get(ns.Name); err != nil { if _, err := clientset.Core().Namespaces().Get(ns.Name, metav1.GetOptions{}); err != nil {
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
return true, nil return true, nil
} }

View File

@ -423,12 +423,12 @@ func (config *NetworkingTestConfig) createTestPods() {
ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name)) ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name))
var err error var err error
config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name) config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err) Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
} }
config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name) config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err) Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
} }
@ -441,7 +441,7 @@ func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.S
err = WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second) err = WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
createdService, err := config.getServiceClient().Get(serviceSpec.Name) createdService, err := config.getServiceClient().Get(serviceSpec.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
return createdService return createdService
@ -542,7 +542,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
runningPods := make([]*v1.Pod, 0, len(nodes)) runningPods := make([]*v1.Pod, 0, len(nodes))
for _, p := range createdPods { for _, p := range createdPods {
ExpectNoError(config.f.WaitForPodReady(p.Name)) ExpectNoError(config.f.WaitForPodReady(p.Name))
rp, err := config.getPodClient().Get(p.Name) rp, err := config.getPodClient().Get(p.Name, metav1.GetOptions{})
ExpectNoError(err) ExpectNoError(err)
runningPods = append(runningPods, rp) runningPods = append(runningPods, rp)
} }

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -66,7 +67,7 @@ func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
p := c.Create(pod) p := c.Create(pod)
ExpectNoError(c.f.WaitForPodRunning(p.Name)) ExpectNoError(c.f.WaitForPodRunning(p.Name))
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip. // Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
p, err := c.Get(p.Name) p, err := c.Get(p.Name, metav1.GetOptions{})
ExpectNoError(err) ExpectNoError(err)
return p return p
} }
@ -92,7 +93,7 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
// pod object. // pod object.
func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) { func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) { ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
pod, err := c.PodInterface.Get(name) pod, err := c.PodInterface.Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get pod %q: %v", name, err) return false, fmt.Errorf("failed to get pod %q: %v", name, err)
} }

View File

@ -367,7 +367,7 @@ func SkipUnlessFederated(c clientset.Interface) {
federationNS = "federation" federationNS = "federation"
} }
_, err := c.Core().Namespaces().Get(federationNS) _, err := c.Core().Namespaces().Get(federationNS, metav1.GetOptions{})
if err != nil { if err != nil {
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
Skipf("Could not find federation namespace %s: skipping federated test", federationNS) Skipf("Could not find federation namespace %s: skipping federated test", federationNS)
@ -756,7 +756,7 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error { func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc) Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pod, err := c.Core().Pods(ns).Get(podName) pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil { if err != nil {
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
Logf("Pod %q in namespace %q disappeared. Error: %v", podName, ns, err) Logf("Pod %q in namespace %q disappeared. Error: %v", podName, ns, err)
@ -828,7 +828,7 @@ func WaitForFederationApiserverReady(c *federation_release_1_5.Clientset) error
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error { func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase) Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.Core().PersistentVolumes().Get(pvName) pv, err := c.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil { if err != nil {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err) Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue continue
@ -848,7 +848,7 @@ func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.In
func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error { func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName) Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.Core().PersistentVolumes().Get(pvName) pv, err := c.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err == nil { if err == nil {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start)) Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue continue
@ -868,7 +868,7 @@ func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll,
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase) Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvcName) pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil { if err != nil {
Logf("Get persistent volume claim %s in failed, ignoring for %v: %v", pvcName, Poll, err) Logf("Get persistent volume claim %s in failed, ignoring for %v: %v", pvcName, Poll, err)
continue continue
@ -973,7 +973,7 @@ func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace st
// wait for namespace to delete or timeout. // wait for namespace to delete or timeout.
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) { err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
if _, err := c.Core().Namespaces().Get(namespace); err != nil { if _, err := c.Core().Namespaces().Get(namespace, metav1.GetOptions{}); err != nil {
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
return true, nil return true, nil
} }
@ -1046,7 +1046,7 @@ func logNamespaces(c clientset.Interface, namespace string) {
// logNamespace logs detail about a namespace // logNamespace logs detail about a namespace
func logNamespace(c clientset.Interface, namespace string) { func logNamespace(c clientset.Interface, namespace string) {
ns, err := c.Core().Namespaces().Get(namespace) ns, err := c.Core().Namespaces().Get(namespace, metav1.GetOptions{})
if err != nil { if err != nil {
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
Logf("namespace: %v no longer exists", namespace) Logf("namespace: %v no longer exists", namespace)
@ -1452,7 +1452,7 @@ func WaitForRCPodToDisappear(c clientset.Interface, ns, rcName, podName string)
// WaitForService waits until the service appears (exist == true), or disappears (exist == false) // WaitForService waits until the service appears (exist == true), or disappears (exist == false)
func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) { err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.Core().Services(namespace).Get(name) _, err := c.Core().Services(namespace).Get(name, metav1.GetOptions{})
switch { switch {
case err == nil: case err == nil:
if !exist { if !exist {
@ -1507,7 +1507,7 @@ func countEndpointsNum(e *v1.Endpoints) int {
// WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false) // WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
func WaitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { func WaitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) { err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.Core().ReplicationControllers(namespace).Get(name) _, err := c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err) Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil return !exist, nil
@ -1525,7 +1525,7 @@ func WaitForReplicationController(c clientset.Interface, namespace, name string,
func WaitForEndpoint(c clientset.Interface, ns, name string) error { func WaitForEndpoint(c clientset.Interface, ns, name string) error {
for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) { for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) {
endpoint, err := c.Core().Endpoints(ns).Get(name) endpoint, err := c.Core().Endpoints(ns).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
Logf("Endpoint %s/%s is not ready yet", ns, name) Logf("Endpoint %s/%s is not ready yet", ns, name)
@ -2178,7 +2178,7 @@ func (f *Framework) MatchContainerOutput(
} }
// Grab its logs. Get host first. // Grab its logs. Get host first.
podStatus, err := podClient.Get(createdPod.Name) podStatus, err := podClient.Get(createdPod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("failed to get pod status: %v", err) return fmt.Errorf("failed to get pod status: %v", err)
} }
@ -2309,7 +2309,7 @@ func dumpAllNodeInfo(c clientset.Interface) {
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) { func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
for _, n := range nodeNames { for _, n := range nodeNames {
logFunc("\nLogging node info for node %v", n) logFunc("\nLogging node info for node %v", n)
node, err := c.Core().Nodes().Get(n) node, err := c.Core().Nodes().Get(n, metav1.GetOptions{})
if err != nil { if err != nil {
logFunc("Error getting node info %v", err) logFunc("Error getting node info %v", err)
} }
@ -2482,7 +2482,7 @@ func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, la
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) { func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
By("verifying the node has the label " + labelKey + " " + labelValue) By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.Core().Nodes().Get(nodeName) node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err) ExpectNoError(err)
Expect(node.Labels[labelKey]).To(Equal(labelValue)) Expect(node.Labels[labelKey]).To(Equal(labelValue))
} }
@ -2499,7 +2499,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string)
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) { func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) {
for attempt := 0; attempt < UpdateRetries; attempt++ { for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err := c.Core().Nodes().Get(nodeName) node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err) ExpectNoError(err)
nodeTaints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations) nodeTaints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
@ -2553,7 +2553,7 @@ func taintExists(taints []v1.Taint, taintToFind v1.Taint) bool {
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint v1.Taint) { func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint v1.Taint) {
By("verifying the node has the taint " + taint.ToString()) By("verifying the node has the taint " + taint.ToString())
node, err := c.Core().Nodes().Get(nodeName) node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err) ExpectNoError(err)
nodeTaints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations) nodeTaints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
@ -2586,7 +2586,7 @@ func deleteTaint(oldTaints []v1.Taint, taintToDelete v1.Taint) ([]v1.Taint, erro
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) { func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
By("removing the taint " + taint.ToString() + " off the node " + nodeName) By("removing the taint " + taint.ToString() + " off the node " + nodeName)
for attempt := 0; attempt < UpdateRetries; attempt++ { for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err := c.Core().Nodes().Get(nodeName) node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err) ExpectNoError(err)
nodeTaints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations) nodeTaints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
@ -2622,7 +2622,7 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint)
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
} }
nodeUpdated, err := c.Core().Nodes().Get(nodeName) nodeUpdated, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err) ExpectNoError(err)
By("verifying the node doesn't have the taint " + taint.ToString()) By("verifying the node doesn't have the taint " + taint.ToString())
taintsGot, err := v1.GetTaintsFromNodeAnnotations(nodeUpdated.Annotations) taintsGot, err := v1.GetTaintsFromNodeAnnotations(nodeUpdated.Annotations)
@ -2774,11 +2774,11 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la
func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) { func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
switch kind { switch kind {
case api.Kind("ReplicationController"): case api.Kind("ReplicationController"):
return c.Core().ReplicationControllers(ns).Get(name) return c.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("ReplicaSet"): case extensionsinternal.Kind("ReplicaSet"):
return c.Extensions().ReplicaSets(ns).Get(name) return c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("Deployment"): case extensionsinternal.Kind("Deployment"):
return c.Extensions().Deployments(ns).Get(name) return c.Extensions().Deployments(ns).Get(name, metav1.GetOptions{})
default: default:
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind) return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
} }
@ -3020,7 +3020,7 @@ func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) er
// Delete a ReplicaSet and all pods it spawned // Delete a ReplicaSet and all pods it spawned
func DeleteReplicaSet(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error { func DeleteReplicaSet(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns)) By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
rc, err := clientset.Extensions().ReplicaSets(ns).Get(name) rc, err := clientset.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err) Logf("ReplicaSet %s was already deleted: %v", name, err)
@ -3068,7 +3068,7 @@ func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet)
// WaitForReadyReplicaSet waits until the replica set has all of its replicas ready. // WaitForReadyReplicaSet waits until the replica set has all of its replicas ready.
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error { func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
rs, err := c.Extensions().ReplicaSets(ns).Get(name) rs, err := c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -3134,7 +3134,7 @@ func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deploymen
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) { err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
var err error var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name) deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -3206,7 +3206,7 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) er
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) { err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
var err error var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name) deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -3260,7 +3260,7 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) er
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas // WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int, desiredGeneration int64) error { func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int, desiredGeneration int64) error {
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) { err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -3279,7 +3279,7 @@ func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentNa
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early. // Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error { func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) { err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -3303,7 +3303,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
var reason string var reason string
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) { err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
var err error var err error
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName) deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -3360,7 +3360,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
func WaitForOverlappingAnnotationMatch(c clientset.Interface, ns, deploymentName, expected string) error { func WaitForOverlappingAnnotationMatch(c clientset.Interface, ns, deploymentName, expected string) error {
return wait.Poll(Poll, 1*time.Minute, func() (bool, error) { return wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -3373,7 +3373,7 @@ func WaitForOverlappingAnnotationMatch(c clientset.Interface, ns, deploymentName
// CheckNewRSAnnotations check if the new RS's annotation is as expected // CheckNewRSAnnotations check if the new RS's annotation is as expected
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error { func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -3410,7 +3410,7 @@ func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds in
// Waits for the deployment to clean up old rcs. // Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error { func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -3435,13 +3435,15 @@ func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*
} }
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error { func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute) return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
return c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
}, desiredGeneration, Poll, 1*time.Minute)
} }
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error { func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error {
var conditions []extensions.DeploymentCondition var conditions []extensions.DeploymentCondition
pollErr := wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) { pollErr := wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -3516,7 +3518,7 @@ func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string,
deployments := c.Extensions().Deployments(namespace) deployments := c.Extensions().Deployments(namespace)
var updateErr error var updateErr error
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if deployment, err = deployments.Get(name); err != nil { if deployment, err = deployments.Get(name, metav1.GetOptions{}); err != nil {
return false, err return false, err
} }
// Apply the update, then attempt to push it to the apiserver. // Apply the update, then attempt to push it to the apiserver.
@ -3541,7 +3543,7 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string,
var updateErr error var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
var err error var err error
if rs, err = c.Extensions().ReplicaSets(namespace).Get(name); err != nil { if rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {
return false, err return false, err
} }
// Apply the update, then attempt to push it to the apiserver. // Apply the update, then attempt to push it to the apiserver.
@ -3566,7 +3568,7 @@ func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, na
var updateErr error var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
var err error var err error
if rc, err = c.Core().ReplicationControllers(namespace).Get(name); err != nil { if rc, err = c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}); err != nil {
return false, err return false, err
} }
// Apply the update, then attempt to push it to the apiserver. // Apply the update, then attempt to push it to the apiserver.
@ -3590,7 +3592,7 @@ func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string,
statefulSets := c.Apps().StatefulSets(namespace) statefulSets := c.Apps().StatefulSets(namespace)
var updateErr error var updateErr error
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if statefulSet, err = statefulSets.Get(name); err != nil { if statefulSet, err = statefulSets.Get(name, metav1.GetOptions{}); err != nil {
return false, err return false, err
} }
// Apply the update, then attempt to push it to the apiserver. // Apply the update, then attempt to push it to the apiserver.
@ -3614,7 +3616,7 @@ func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUp
jobs := c.Batch().Jobs(namespace) jobs := c.Batch().Jobs(namespace)
var updateErr error var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if job, err = jobs.Get(name); err != nil { if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
return false, err return false, err
} }
// Apply the update, then attempt to push it to the apiserver. // Apply the update, then attempt to push it to the apiserver.
@ -3932,7 +3934,7 @@ func IsNodeConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) boo
func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool { func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue) Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
node, err := c.Core().Nodes().Get(name) node, err := c.Core().Nodes().Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
Logf("Couldn't get node %s", name) Logf("Couldn't get node %s", name)
continue continue
@ -4259,7 +4261,7 @@ func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeou
// address. Returns an error if the node the pod is on doesn't have an External // address. Returns an error if the node the pod is on doesn't have an External
// address. // address.
func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) { func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) {
node, err := client.Core().Nodes().Get(p.Spec.NodeName) node, err := client.Core().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return "", err return "", err
} }
@ -4335,7 +4337,7 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st
// getIngressAddress returns the ips/hostnames associated with the Ingress. // getIngressAddress returns the ips/hostnames associated with the Ingress.
func getIngressAddress(client clientset.Interface, ns, name string) ([]string, error) { func getIngressAddress(client clientset.Interface, ns, name string) ([]string, error) {
ing, err := client.Extensions().Ingresses(ns).Get(name) ing, err := client.Extensions().Ingresses(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -4406,7 +4408,7 @@ func LookForString(expectedString string, timeout time.Duration, fn func() strin
// getSvcNodePort returns the node port for the given service:port. // getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) { func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) {
svc, err := client.Core().Services(ns).Get(name) svc, err := client.Core().Services(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -4471,7 +4473,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl
if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil { if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil {
return err return err
} }
rc, err := clientset.Core().ReplicationControllers(ns).Get(name) rc, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -4739,7 +4741,7 @@ func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
_, err := podClient.Create(pod) _, err := podClient.Create(pod)
ExpectNoError(err) ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName)) ExpectNoError(f.WaitForPodRunning(podName))
createdPod, err := podClient.Get(podName) createdPod, err := podClient.Get(podName, metav1.GetOptions{})
ExpectNoError(err) ExpectNoError(err)
ip = fmt.Sprintf("%s:%d", createdPod.Status.PodIP, port) ip = fmt.Sprintf("%s:%d", createdPod.Status.PodIP, port)
Logf("Target pod IP:port is %s", ip) Logf("Target pod IP:port is %s", ip)
@ -4799,7 +4801,7 @@ func CoreDump(dir string) {
func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*v1.Pod)) (*v1.Pod, error) { func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*v1.Pod)) (*v1.Pod, error) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
pod, err := client.Core().Pods(ns).Get(name) pod, err := client.Core().Pods(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to get pod %q: %v", name, err) return nil, fmt.Errorf("Failed to get pod %q: %v", name, err)
} }
@ -5061,7 +5063,7 @@ func getMaster(c clientset.Interface) Address {
master := Address{} master := Address{}
// Populate the internal IP. // Populate the internal IP.
eps, err := c.Core().Endpoints(v1.NamespaceDefault).Get("kubernetes") eps, err := c.Core().Endpoints(v1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil { if err != nil {
Failf("Failed to get kubernetes endpoints: %v", err) Failf("Failed to get kubernetes endpoints: %v", err)
} }

View File

@ -182,7 +182,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
} }
// wait for rc to create pods // wait for rc to create pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc, err := rcClient.Get(rc.Name) rc, err := rcClient.Get(rc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("Failed to get rc: %v", err) return false, fmt.Errorf("Failed to get rc: %v", err)
} }
@ -242,7 +242,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
} }
// wait for rc to create some pods // wait for rc to create some pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc, err := rcClient.Get(rc.Name) rc, err := rcClient.Get(rc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("Failed to get rc: %v", err) return false, fmt.Errorf("Failed to get rc: %v", err)
} }

View File

@ -44,6 +44,7 @@ import (
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -307,7 +308,7 @@ func createSecret(kubeClient clientset.Interface, ing *extensions.Ingress) (host
}, },
} }
var s *v1.Secret var s *v1.Secret
if s, err = kubeClient.Core().Secrets(ing.Namespace).Get(tls.SecretName); err == nil { if s, err = kubeClient.Core().Secrets(ing.Namespace).Get(tls.SecretName, metav1.GetOptions{}); err == nil {
// TODO: Retry the update. We don't really expect anything to conflict though. // TODO: Retry the update. We don't really expect anything to conflict though.
framework.Logf("Updating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name) framework.Logf("Updating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name)
s.Data = secret.Data s.Data = secret.Data
@ -767,7 +768,7 @@ func (j *testJig) update(update func(ing *extensions.Ingress)) {
var err error var err error
ns, name := j.ing.Namespace, j.ing.Name ns, name := j.ing.Namespace, j.ing.Name
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
j.ing, err = j.client.Extensions().Ingresses(ns).Get(name) j.ing, err = j.client.Extensions().Ingresses(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("failed to get ingress %q: %v", name, err) framework.Failf("failed to get ingress %q: %v", name, err)
} }
@ -878,7 +879,7 @@ func ingFromManifest(fileName string) *extensions.Ingress {
func (cont *GCEIngressController) getL7AddonUID() (string, error) { func (cont *GCEIngressController) getL7AddonUID() (string, error) {
framework.Logf("Retrieving UID from config map: %v/%v", api.NamespaceSystem, uidConfigMap) framework.Logf("Retrieving UID from config map: %v/%v", api.NamespaceSystem, uidConfigMap)
cm, err := cont.c.Core().ConfigMaps(api.NamespaceSystem).Get(uidConfigMap) cm, err := cont.c.Core().ConfigMaps(api.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{})
if err != nil { if err != nil {
return "", err return "", err
} }
@ -932,7 +933,7 @@ func (cont *NginxIngressController) init() {
framework.Logf("initializing nginx ingress controller") framework.Logf("initializing nginx ingress controller")
framework.RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", cont.ns)) framework.RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", cont.ns))
rc, err := cont.c.Core().ReplicationControllers(cont.ns).Get("nginx-ingress-controller") rc, err := cont.c.Core().ReplicationControllers(cont.ns).Get("nginx-ingress-controller", metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
cont.rc = rc cont.rc = rc

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
batchinternal "k8s.io/kubernetes/pkg/apis/batch" batchinternal "k8s.io/kubernetes/pkg/apis/batch"
batch "k8s.io/kubernetes/pkg/apis/batch/v1" batch "k8s.io/kubernetes/pkg/apis/batch/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -276,7 +277,7 @@ func newTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, compl
} }
func getJob(c clientset.Interface, ns, name string) (*batch.Job, error) { func getJob(c clientset.Interface, ns, name string) (*batch.Job, error) {
return c.Batch().Jobs(ns).Get(name) return c.Batch().Jobs(ns).Get(name, metav1.GetOptions{})
} }
func createJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) { func createJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
@ -313,7 +314,7 @@ func waitForAllPodsRunning(c clientset.Interface, ns, jobName string, parallelis
// Wait for job to reach completions. // Wait for job to reach completions.
func waitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error { func waitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
curr, err := c.Batch().Jobs(ns).Get(jobName) curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -324,7 +325,7 @@ func waitForJobFinish(c clientset.Interface, ns, jobName string, completions int
// Wait for job fail. // Wait for job fail.
func waitForJobFail(c clientset.Interface, ns, jobName string, timeout time.Duration) error { func waitForJobFail(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
return wait.Poll(framework.Poll, timeout, func() (bool, error) { return wait.Poll(framework.Poll, timeout, func() (bool, error) {
curr, err := c.Batch().Jobs(ns).Get(jobName) curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -21,6 +21,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -60,7 +61,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
// being run as the first e2e test just after the e2e cluster has been created. // being run as the first e2e test just after the e2e cluster has been created.
var err error var err error
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
if _, err = s.Get("kibana-logging"); err == nil { if _, err = s.Get("kibana-logging", metav1.GetOptions{}); err == nil {
break break
} }
framework.Logf("Attempt to check for the existence of the Kibana service failed after %v", time.Since(start)) framework.Logf("Attempt to check for the existence of the Kibana service failed after %v", time.Since(start))

View File

@ -205,7 +205,7 @@ var _ = framework.KubeDescribe("Kubectl alpha client", func() {
framework.RunKubectlOrDie("run", sjName, "--restart=OnFailure", "--generator=scheduledjob/v2alpha1", framework.RunKubectlOrDie("run", sjName, "--restart=OnFailure", "--generator=scheduledjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag) "--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the ScheduledJob " + sjName + " was created") By("verifying the ScheduledJob " + sjName + " was created")
sj, err := c.BatchV2alpha1().CronJobs(ns).Get(sjName) sj, err := c.BatchV2alpha1().CronJobs(ns).Get(sjName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting ScheduledJob %s: %v", sjName, err) framework.Failf("Failed getting ScheduledJob %s: %v", sjName, err)
} }
@ -242,7 +242,7 @@ var _ = framework.KubeDescribe("Kubectl alpha client", func() {
framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1", framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag) "--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the CronJob " + cjName + " was created") By("verifying the CronJob " + cjName + " was created")
sj, err := c.BatchV2alpha1().CronJobs(ns).Get(cjName) sj, err := c.BatchV2alpha1().CronJobs(ns).Get(cjName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting CronJob %s: %v", cjName, err) framework.Failf("Failed getting CronJob %s: %v", cjName, err)
} }
@ -756,7 +756,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
}) })
validateService := func(name string, servicePort int, timeout time.Duration) { validateService := func(name string, servicePort int, timeout time.Duration) {
err := wait.Poll(framework.Poll, timeout, func() (bool, error) { err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
endpoints, err := c.Core().Endpoints(ns).Get(name) endpoints, err := c.Core().Endpoints(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
// log the real error // log the real error
framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err) framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
@ -787,7 +787,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
service, err := c.Core().Services(ns).Get(name) service, err := c.Core().Services(ns).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(service.Spec.Ports) != 1 { if len(service.Spec.Ports) != 1 {
@ -1016,7 +1016,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("running the image " + nginxImage) By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag)
By("verifying the rc " + rcName + " was created") By("verifying the rc " + rcName + " was created")
rc, err := c.Core().ReplicationControllers(ns).Get(rcName) rc, err := c.Core().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err) framework.Failf("Failed getting rc %s: %v", rcName, err)
} }
@ -1072,7 +1072,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("running the image " + nginxImage) By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag)
By("verifying the rc " + rcName + " was created") By("verifying the rc " + rcName + " was created")
rc, err := c.Core().ReplicationControllers(ns).Get(rcName) rc, err := c.Core().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err) framework.Failf("Failed getting rc %s: %v", rcName, err)
} }
@ -1118,7 +1118,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("running the image " + nginxImage) By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag) framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag)
By("verifying the deployment " + dName + " was created") By("verifying the deployment " + dName + " was created")
d, err := c.Extensions().Deployments(ns).Get(dName) d, err := c.Extensions().Deployments(ns).Get(dName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting deployment %s: %v", dName, err) framework.Failf("Failed getting deployment %s: %v", dName, err)
} }
@ -1160,7 +1160,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("running the image " + nginxImage) By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+nginxImage, nsFlag) framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+nginxImage, nsFlag)
By("verifying the job " + jobName + " was created") By("verifying the job " + jobName + " was created")
job, err := c.Batch().Jobs(ns).Get(jobName) job, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting job %s: %v", jobName, err) framework.Failf("Failed getting job %s: %v", jobName, err)
} }
@ -1193,7 +1193,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("running the image " + nginxImage) By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+nginxImage, nsFlag) framework.RunKubectlOrDie("run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+nginxImage, nsFlag)
By("verifying the pod " + podName + " was created") By("verifying the pod " + podName + " was created")
pod, err := c.Core().Pods(ns).Get(podName) pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err) framework.Failf("Failed getting pod %s: %v", podName, err)
} }
@ -1244,7 +1244,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
framework.RunKubectlOrDieInput(podJson, "replace", "-f", "-", nsFlag) framework.RunKubectlOrDieInput(podJson, "replace", "-f", "-", nsFlag)
By("verifying the pod " + podName + " has the right image " + busyboxImage) By("verifying the pod " + podName + " has the right image " + busyboxImage)
pod, err := c.Core().Pods(ns).Get(podName) pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting deployment %s: %v", podName, err) framework.Failf("Failed getting deployment %s: %v", podName, err)
} }
@ -1276,7 +1276,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
Expect(runOutput).To(ContainSubstring("stdin closed")) Expect(runOutput).To(ContainSubstring("stdin closed"))
By("verifying the job " + jobName + " was deleted") By("verifying the job " + jobName + " was deleted")
_, err := c.Batch().Jobs(ns).Get(jobName) _, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(apierrs.IsNotFound(err)).To(BeTrue()) Expect(apierrs.IsNotFound(err)).To(BeTrue())
}) })
@ -1425,7 +1425,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag) framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag)
By("verifying that the quota was created") By("verifying that the quota was created")
quota, err := c.Core().ResourceQuotas(ns).Get(quotaName) quota, err := c.Core().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err) framework.Failf("Failed getting quota %s: %v", quotaName, err)
} }
@ -1455,7 +1455,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag) framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag)
By("verifying that the quota was created") By("verifying that the quota was created")
quota, err := c.Core().ResourceQuotas(ns).Get(quotaName) quota, err := c.Core().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err) framework.Failf("Failed getting quota %s: %v", quotaName, err)
} }

View File

@ -22,6 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -101,7 +102,7 @@ func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRem
var node *v1.Node var node *v1.Node
var err error var err error
for i := 0; i < maxRetries; i++ { for i := 0; i < maxRetries; i++ {
node, err = c.Core().Nodes().Get(nodeName) node, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("Error getting node %s: %v", nodeName, err) framework.Logf("Error getting node %s: %v", nodeName, err)
continue continue

View File

@ -21,6 +21,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -46,7 +47,7 @@ var _ = framework.KubeDescribe("LimitRange", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Fetching the LimitRange to ensure it has proper values") By("Fetching the LimitRange to ensure it has proper values")
limitRange, err = f.ClientSet.Core().LimitRanges(f.Namespace.Name).Get(limitRange.Name) limitRange, err = f.ClientSet.Core().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit} expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default} actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
err = equalResourceRequirement(expected, actual) err = equalResourceRequirement(expected, actual)
@ -58,7 +59,7 @@ var _ = framework.KubeDescribe("LimitRange", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring Pod has resource requirements applied from LimitRange") By("Ensuring Pod has resource requirements applied from LimitRange")
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for i := range pod.Spec.Containers { for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
@ -75,7 +76,7 @@ var _ = framework.KubeDescribe("LimitRange", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring Pod has merged resource requirements applied from LimitRange") By("Ensuring Pod has merged resource requirements applied from LimitRange")
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// This is an interesting case, so it's worth a comment // This is an interesting case, so it's worth a comment
// If you specify a Limit, and no Request, the Limit will default to the Request // If you specify a Limit, and no Request, the Limit will default to the Request

View File

@ -101,7 +101,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, podName, ns)) framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, podName, ns))
pod, err := c.Core().Pods(ns).Get(podName) pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeClient := f.ClientSet.Core().Nodes() nodeClient := f.ClientSet.Core().Nodes()

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -116,7 +117,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) { func() (bool, error) {
_, err = f.ClientSet.Core().Namespaces().Get(namespace.Name) _, err = f.ClientSet.Core().Namespaces().Get(namespace.Name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil
} }
@ -124,7 +125,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
})) }))
By("Verifying there is no pod in the namespace") By("Verifying there is no pod in the namespace")
_, err = f.ClientSet.Core().Pods(namespace.Name).Get(pod.Name) _, err = f.ClientSet.Core().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
} }
@ -168,7 +169,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
maxWaitSeconds := int64(60) maxWaitSeconds := int64(60)
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) { func() (bool, error) {
_, err = f.ClientSet.Core().Namespaces().Get(namespace.Name) _, err = f.ClientSet.Core().Namespaces().Get(namespace.Name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil
} }
@ -176,7 +177,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
})) }))
By("Verifying there is no service in the namespace") By("Verifying there is no service in the namespace")
_, err = f.ClientSet.Core().Services(namespace.Name).Get(service.Name) _, err = f.ClientSet.Core().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
} }

View File

@ -23,6 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
@ -268,7 +269,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName nodeName := pods.Items[0].Spec.NodeName
node, err := c.Core().Nodes().Get(nodeName) node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// This creates a temporary network partition, verifies that 'podNameToDisappear', // This creates a temporary network partition, verifies that 'podNameToDisappear',
@ -306,7 +307,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
// verify that it is really on the requested node // verify that it is really on the requested node
{ {
pod, err := c.Core().Pods(ns).Get(additionalPod) pod, err := c.Core().Pods(ns).Get(additionalPod, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if pod.Spec.NodeName != node.Name { if pod.Spec.NodeName != node.Name {
framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name) framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
@ -333,7 +334,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName nodeName := pods.Items[0].Spec.NodeName
node, err := c.Core().Nodes().Get(nodeName) node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// This creates a temporary network partition, verifies that 'podNameToDisappear', // This creates a temporary network partition, verifies that 'podNameToDisappear',
@ -411,7 +412,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
pst.waitForRunningAndReady(*ps.Spec.Replicas, ps) pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
pod := pst.getPodList(ps).Items[0] pod := pst.getPodList(ps).Items[0]
node, err := c.Core().Nodes().Get(pod.Spec.NodeName) node, err := c.Core().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear', // Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear',
@ -453,7 +454,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName nodeName := pods.Items[0].Spec.NodeName
node, err := c.Core().Nodes().Get(nodeName) node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// This creates a temporary network partition, verifies that the job has 'parallelism' number of // This creates a temporary network partition, verifies that the job has 'parallelism' number of

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1" coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
@ -411,7 +412,7 @@ func verifyNoEvents(e coreclientset.EventInterface, options v1.ListOptions) erro
// verifyCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked // verifyCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked
func verifyCondition(n coreclientset.NodeInterface, nodeName string, condition v1.NodeConditionType, status v1.ConditionStatus, reason, message string) error { func verifyCondition(n coreclientset.NodeInterface, nodeName string, condition v1.NodeConditionType, status v1.ConditionStatus, reason, message string) error {
node, err := n.Get(nodeName) node, err := n.Get(nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }

View File

@ -24,6 +24,7 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -98,7 +99,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
}) })
It("runs out of disk space", func() { It("runs out of disk space", func() {
unfilledNode, err := c.Core().Nodes().Get(unfilledNodeName) unfilledNode, err := c.Core().Nodes().Get(unfilledNodeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Calculating CPU availability on node %s", unfilledNode.Name)) By(fmt.Sprintf("Calculating CPU availability on node %s", unfilledNode.Name))
@ -122,7 +123,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
createOutOfDiskPod(c, ns, name, podCPU) createOutOfDiskPod(c, ns, name, podCPU)
framework.ExpectNoError(f.WaitForPodRunning(name)) framework.ExpectNoError(f.WaitForPodRunning(name))
pod, err := podClient.Get(name) pod, err := podClient.Get(name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(pod.Spec.NodeName).To(Equal(unfilledNodeName)) Expect(pod.Spec.NodeName).To(Equal(unfilledNodeName))
} }
@ -161,7 +162,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
By(fmt.Sprintf("Verifying that pod %s schedules on node %s", pendingPodName, recoveredNodeName)) By(fmt.Sprintf("Verifying that pod %s schedules on node %s", pendingPodName, recoveredNodeName))
framework.ExpectNoError(f.WaitForPodRunning(pendingPodName)) framework.ExpectNoError(f.WaitForPodRunning(pendingPodName))
pendingPod, err := podClient.Get(pendingPodName) pendingPod, err := podClient.Get(pendingPodName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(pendingPod.Spec.NodeName).To(Equal(recoveredNodeName)) Expect(pendingPod.Spec.NodeName).To(Equal(recoveredNodeName))
}) })

View File

@ -724,7 +724,7 @@ func waitForPDInVolumesInUse(
"Waiting for node %s's VolumesInUse Status %s PD %q", "Waiting for node %s's VolumesInUse Status %s PD %q",
nodeName, logStr, diskName) nodeName, logStr, diskName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) {
nodeObj, err := nodeClient.Get(string(nodeName)) nodeObj, err := nodeClient.Get(string(nodeName), metav1.GetOptions{})
if err != nil || nodeObj == nil { if err != nil || nodeObj == nil {
framework.Logf( framework.Logf(
"Failed to fetch node object %q from API server. err=%v", "Failed to fetch node object %q from API server. err=%v",

View File

@ -69,7 +69,7 @@ func pvPvcCleanup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap)
if c != nil && len(ns) > 0 { if c != nil && len(ns) > 0 {
for pvcKey := range claims { for pvcKey := range claims {
_, err := c.Core().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name) _, err := c.Core().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) { if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf(" deleting PVC %v ...", pvcKey) framework.Logf(" deleting PVC %v ...", pvcKey)
@ -81,7 +81,7 @@ func pvPvcCleanup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap)
} }
for name := range pvols { for name := range pvols {
_, err := c.Core().PersistentVolumes().Get(name) _, err := c.Core().PersistentVolumes().Get(name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) { if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf(" deleting PV %v ...", name) framework.Logf(" deleting PV %v ...", name)
@ -106,7 +106,7 @@ func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Check that the PVC is really deleted. // Check that the PVC is really deleted.
pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name) pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
Expect(apierrs.IsNotFound(err)).To(BeTrue()) Expect(apierrs.IsNotFound(err)).To(BeTrue())
// Wait for the PV's phase to return to the expected Phase // Wait for the PV's phase to return to the expected Phase
@ -115,7 +115,7 @@ func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// examine the pv's ClaimRef and UID and compare to expected values // examine the pv's ClaimRef and UID and compare to expected values
pv, err = c.Core().PersistentVolumes().Get(pv.Name) pv, err = c.Core().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
cr := pv.Spec.ClaimRef cr := pv.Spec.ClaimRef
if expctPVPhase == v1.VolumeAvailable { if expctPVPhase == v1.VolumeAvailable {
@ -140,7 +140,7 @@ func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap,
var expctPVPhase v1.PersistentVolumePhase var expctPVPhase v1.PersistentVolumePhase
for pvName := range pvols { for pvName := range pvols {
pv, err := c.Core().PersistentVolumes().Get(pvName) pv, err := c.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
Expect(apierrs.IsNotFound(err)).To(BeFalse()) Expect(apierrs.IsNotFound(err)).To(BeFalse())
cr := pv.Spec.ClaimRef cr := pv.Spec.ClaimRef
// if pv is bound then delete the pvc it is bound to // if pv is bound then delete the pvc it is bound to
@ -151,7 +151,7 @@ func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap,
pvcKey := makePvcKey(ns, cr.Name) pvcKey := makePvcKey(ns, cr.Name)
_, found := claims[pvcKey] _, found := claims[pvcKey]
Expect(found).To(BeTrue()) Expect(found).To(BeTrue())
pvc, err := c.Core().PersistentVolumeClaims(ns).Get(cr.Name) pvc, err := c.Core().PersistentVolumeClaims(ns).Get(cr.Name, metav1.GetOptions{})
Expect(apierrs.IsNotFound(err)).To(BeFalse()) Expect(apierrs.IsNotFound(err)).To(BeFalse())
// what Phase do we expect the PV that was bound to the claim to // what Phase do we expect the PV that was bound to the claim to
@ -305,11 +305,11 @@ func waitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Re-get the pv and pvc objects // Re-get the pv and pvc objects
pv, err = c.Core().PersistentVolumes().Get(pv.Name) pv, err = c.Core().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Re-get the pvc and // Re-get the pvc and
pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name) pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// The pv and pvc are both bound, but to each other? // The pv and pvc are both bound, but to each other?
@ -343,7 +343,7 @@ func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pv
} }
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
pv, err := c.Core().PersistentVolumes().Get(pvName) pv, err := c.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if cr := pv.Spec.ClaimRef; cr != nil && len(cr.Name) > 0 { if cr := pv.Spec.ClaimRef; cr != nil && len(cr.Name) > 0 {
// Assert bound pvc is a test resource. Failing assertion could // Assert bound pvc is a test resource. Failing assertion could
@ -435,7 +435,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
// 1. verify each PV permits write access to a client pod // 1. verify each PV permits write access to a client pod
By("Checking pod has write access to PersistentVolumes") By("Checking pod has write access to PersistentVolumes")
for pvcKey := range claims { for pvcKey := range claims {
pvc, err := c.Core().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name) pvc, err := c.Core().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(pvc.Spec.VolumeName) == 0 { if len(pvc.Spec.VolumeName) == 0 {
continue // claim is not bound continue // claim is not bound
@ -503,7 +503,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
AfterEach(func() { AfterEach(func() {
if c != nil && len(ns) > 0 { if c != nil && len(ns) > 0 {
if pvc != nil && len(pvc.Name) > 0 { if pvc != nil && len(pvc.Name) > 0 {
_, err := c.Core().PersistentVolumeClaims(ns).Get(pvc.Name) _, err := c.Core().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) { if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf("AfterEach: deleting PVC %v", pvc.Name) framework.Logf("AfterEach: deleting PVC %v", pvc.Name)
@ -515,7 +515,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
pvc = nil pvc = nil
if pv != nil && len(pv.Name) > 0 { if pv != nil && len(pv.Name) > 0 {
_, err := c.Core().PersistentVolumes().Get(pv.Name) _, err := c.Core().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) { if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf("AfterEach: deleting PV %v", pv.Name) framework.Logf("AfterEach: deleting PV %v", pv.Name)

View File

@ -240,7 +240,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
By("Verifying that the 2nd pod wont be removed if it is not running and ready") By("Verifying that the 2nd pod wont be removed if it is not running and ready")
pst.confirmPetCount(2, ps, 10*time.Second) pst.confirmPetCount(2, ps, 10*time.Second)
expectedPodName := ps.Name + "-1" expectedPodName := ps.Name + "-1"
expectedPod, err := f.ClientSet.Core().Pods(ns).Get(expectedPodName) expectedPod, err := f.ClientSet.Core().Pods(ns).Get(expectedPodName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
watcher, err := f.ClientSet.Core().Pods(ns).Watch(v1.SingleObject( watcher, err := f.ClientSet.Core().Pods(ns).Watch(v1.SingleObject(
v1.ObjectMeta{ v1.ObjectMeta{
@ -488,7 +488,7 @@ var _ = framework.KubeDescribe("Stateful Set recreate", func() {
By("waiting when pet pod " + petPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state") By("waiting when pet pod " + petPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
// we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until // we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until
Eventually(func() error { Eventually(func() error {
petPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(petPodName) petPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(petPodName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -812,7 +812,7 @@ type verifyPodFunc func(*v1.Pod)
func (p *statefulSetTester) verifyPodAtIndex(index int, ps *apps.StatefulSet, verify verifyPodFunc) { func (p *statefulSetTester) verifyPodAtIndex(index int, ps *apps.StatefulSet, verify verifyPodFunc) {
name := getPodNameAtIndex(index, ps) name := getPodNameAtIndex(index, ps)
pod, err := p.c.Core().Pods(ps.Namespace).Get(name) pod, err := p.c.Core().Pods(ps.Namespace).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ps.Namespace, ps.Name)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ps.Namespace, ps.Name))
verify(pod) verify(pod)
} }
@ -861,7 +861,7 @@ func (p *statefulSetTester) restart(ps *apps.StatefulSet) {
func (p *statefulSetTester) update(ns, name string, update func(ps *apps.StatefulSet)) { func (p *statefulSetTester) update(ns, name string, update func(ps *apps.StatefulSet)) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
ps, err := p.c.Apps().StatefulSets(ns).Get(name) ps, err := p.c.Apps().StatefulSets(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("failed to get statefulset %q: %v", name, err) framework.Failf("failed to get statefulset %q: %v", name, err)
} }
@ -979,7 +979,7 @@ func (p *statefulSetTester) waitForStatus(ps *apps.StatefulSet, expectedReplicas
ns, name := ps.Namespace, ps.Name ns, name := ps.Namespace, ps.Name
pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout,
func() (bool, error) { func() (bool, error) {
psGet, err := p.c.Apps().StatefulSets(ns).Get(name) psGet, err := p.c.Apps().StatefulSets(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -25,6 +25,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -100,7 +101,7 @@ var _ = framework.KubeDescribe("Pods Delete Grace Period", func() {
// may be carried out immediately rather than gracefully. // may be carried out immediately rather than gracefully.
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
// save the running pod // save the running pod
pod, err = podClient.Get(pod.Name) pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod")
// start local proxy, so we can send graceful deletion over query string, rather than body parameter // start local proxy, so we can send graceful deletion over query string, rather than body parameter

View File

@ -22,6 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -66,7 +67,7 @@ func testPreStop(c clientset.Interface, ns string) {
val := "{\"Source\": \"prestop\"}" val := "{\"Source\": \"prestop\"}"
podOut, err := c.Core().Pods(ns).Get(podDescr.Name) podOut, err := c.Core().Pods(ns).Get(podDescr.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "getting pod info") framework.ExpectNoError(err, "getting pod info")
preStopDescr := &v1.Pod{ preStopDescr := &v1.Pod{

View File

@ -22,6 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/controller/replication" "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -165,7 +166,7 @@ func rcConditionCheck(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
quota, err = c.Core().ResourceQuotas(namespace).Get(name) quota, err = c.Core().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -187,7 +188,7 @@ func rcConditionCheck(f *framework.Framework) {
generation := rc.Generation generation := rc.Generation
conditions := rc.Status.Conditions conditions := rc.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rc, err = c.Core().ReplicationControllers(namespace).Get(name) rc, err = c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -216,7 +217,7 @@ func rcConditionCheck(f *framework.Framework) {
generation = rc.Generation generation = rc.Generation
conditions = rc.Status.Conditions conditions = rc.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rc, err = c.Core().ReplicationControllers(namespace).Get(name) rc, err = c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -223,7 +224,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
// Get the node initially. // Get the node initially.
framework.Logf("Getting %s", name) framework.Logf("Getting %s", name)
node, err := c.Core().Nodes().Get(name) node, err := c.Core().Nodes().Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("Couldn't get node %s", name) framework.Logf("Couldn't get node %s", name)
return false return false

View File

@ -179,7 +179,7 @@ func rsConditionCheck(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
quota, err = c.Core().ResourceQuotas(namespace).Get(name) quota, err = c.Core().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -201,7 +201,7 @@ func rsConditionCheck(f *framework.Framework) {
generation := rs.Generation generation := rs.Generation
conditions := rs.Status.Conditions conditions := rs.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rs, err = c.Extensions().ReplicaSets(namespace).Get(name) rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -231,7 +231,7 @@ func rsConditionCheck(f *framework.Framework) {
generation = rs.Generation generation = rs.Generation
conditions = rs.Status.Conditions conditions = rs.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rs, err = c.Extensions().ReplicaSets(namespace).Get(name) rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -214,7 +214,7 @@ func newRCByName(c clientset.Interface, ns, name string, replicas int32, gracePe
} }
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error { func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
rc, err := c.Core().ReplicationControllers(ns).Get(name) rc, err := c.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }

View File

@ -22,6 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -645,7 +646,7 @@ func deleteResourceQuota(c clientset.Interface, namespace, name string) error {
// wait for resource quota status to show the expected used resources value // wait for resource quota status to show the expected used resources value
func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.ResourceList) error { func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.ResourceList) error {
return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) { return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) {
resourceQuota, err := c.Core().ResourceQuotas(ns).Get(quotaName) resourceQuota, err := c.Core().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -22,6 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod" kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -157,7 +158,7 @@ func restartNodes(f *framework.Framework, nodeNames []string) error {
// List old boot IDs. // List old boot IDs.
oldBootIDs := make(map[string]string) oldBootIDs := make(map[string]string)
for _, name := range nodeNames { for _, name := range nodeNames {
node, err := f.ClientSet.Core().Nodes().Get(name) node, err := f.ClientSet.Core().Nodes().Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("error getting node info before reboot: %s", err) return fmt.Errorf("error getting node info before reboot: %s", err)
} }
@ -179,7 +180,7 @@ func restartNodes(f *framework.Framework, nodeNames []string) error {
// Wait for their boot IDs to change. // Wait for their boot IDs to change.
for _, name := range nodeNames { for _, name := range nodeNames {
if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) { if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
node, err := f.ClientSet.Core().Nodes().Get(name) node, err := f.ClientSet.Core().Nodes().Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("error getting node info after reboot: %s", err) return false, fmt.Errorf("error getting node info after reboot: %s", err)
} }

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -59,7 +60,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
ignoreLabels := framework.ImagePullerLabels ignoreLabels := framework.ImagePullerLabels
AfterEach(func() { AfterEach(func() {
rc, err := cs.Core().ReplicationControllers(ns).Get(RCName) rc, err := cs.Core().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{})
if err == nil && *(rc.Spec.Replicas) != 0 { if err == nil && *(rc.Spec.Replicas) != 0 {
By("Cleaning up the replication controller") By("Cleaning up the replication controller")
err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, RCName) err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, RCName)
@ -284,7 +285,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion))
labelPod, err := cs.Core().Pods(ns).Get(labelPodName) labelPod, err := cs.Core().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -368,7 +369,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion))
labelPod, err := cs.Core().Pods(ns).Get(labelPodName) labelPod, err := cs.Core().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -393,7 +394,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name, "")) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name, ""))
labelPod, err := cs.Core().Pods(ns).Get(pod.Name) labelPod, err := cs.Core().Pods(ns).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -499,7 +500,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion))
labelPod, err := cs.Core().Pods(ns).Get(labelPodName) labelPod, err := cs.Core().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -610,7 +611,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion))
labelPod, err := cs.Core().Pods(ns).Get(labelPodName) labelPod, err := cs.Core().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -635,7 +636,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name, pod.ResourceVersion))
labelPod, err := cs.Core().Pods(ns).Get(pod.Name) labelPod, err := cs.Core().Pods(ns).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -659,7 +660,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name, pod.ResourceVersion))
labelPod, err := cs.Core().Pods(ns).Get(pod.Name) labelPod, err := cs.Core().Pods(ns).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -711,7 +712,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// already when the kubelet does not know about its new taint yet. The // already when the kubelet does not know about its new taint yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, tolerationPodName, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, tolerationPodName, pod.ResourceVersion))
deployedPod, err := cs.Core().Pods(ns).Get(tolerationPodName) deployedPod, err := cs.Core().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(deployedPod.Spec.NodeName).To(Equal(nodeName)) Expect(deployedPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -799,7 +800,7 @@ func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod { func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod := createPausePod(f, conf) pod := createPausePod(f, conf)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(conf.Name) pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(conf.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
return pod return pod
} }

View File

@ -26,6 +26,7 @@ import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -180,7 +181,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
Expect(err).To(BeNil()) Expect(err).To(BeNil())
Expect(content).To(ContainSubstring(testContent)) Expect(content).To(ContainSubstring(testContent))
foundPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name) foundPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Confirm that the file can be accessed from a second // Confirm that the file can be accessed from a second

View File

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/api/v1/service"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/endpoint"
@ -107,7 +108,7 @@ var _ = framework.KubeDescribe("Services", func() {
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here. // TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
It("should provide secure master service [Conformance]", func() { It("should provide secure master service [Conformance]", func() {
_, err := cs.Core().Services(v1.NamespaceDefault).Get("kubernetes") _, err := cs.Core().Services(v1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -1123,9 +1124,9 @@ var _ = framework.KubeDescribe("Services", func() {
acceptPodName := createExecPodOrFail(cs, namespace, "execpod-accept") acceptPodName := createExecPodOrFail(cs, namespace, "execpod-accept")
dropPodName := createExecPodOrFail(cs, namespace, "execpod-drop") dropPodName := createExecPodOrFail(cs, namespace, "execpod-drop")
accpetPod, err := cs.Core().Pods(namespace).Get(acceptPodName) accpetPod, err := cs.Core().Pods(namespace).Get(acceptPodName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
dropPod, err := cs.Core().Pods(namespace).Get(dropPodName) dropPod, err := cs.Core().Pods(namespace).Get(dropPodName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("creating a pod to be part of the service " + serviceName) By("creating a pod to be part of the service " + serviceName)
@ -1350,7 +1351,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() {
err := cs.Core().Pods(namespace).Delete(execPodName, nil) err := cs.Core().Pods(namespace).Delete(execPodName, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}() }()
execPod, err := f.ClientSet.Core().Pods(namespace).Get(execPodName) execPod, err := f.ClientSet.Core().Pods(namespace).Get(execPodName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Waiting up to %v wget %v", kubeProxyLagTimeout, path) framework.Logf("Waiting up to %v wget %v", kubeProxyLagTimeout, path)
@ -1485,7 +1486,7 @@ func updateService(c clientset.Interface, namespace, serviceName string, update
var service *v1.Service var service *v1.Service
var err error var err error
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
service, err = c.Core().Services(namespace).Get(serviceName) service, err = c.Core().Services(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil { if err != nil {
return service, err return service, err
} }
@ -1540,7 +1541,7 @@ func translatePodNameToUIDOrFail(c clientset.Interface, ns string, expectedEndpo
portsByUID := make(PortsByPodUID) portsByUID := make(PortsByPodUID)
for name, portList := range expectedEndpoints { for name, portList := range expectedEndpoints {
pod, err := c.Core().Pods(ns).Get(name) pod, err := c.Core().Pods(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) framework.Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
} }
@ -1576,7 +1577,7 @@ func validateEndpointsOrFail(c clientset.Interface, namespace, serviceName strin
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", framework.ServiceStartTimeout, serviceName, namespace, expectedEndpoints)) By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", framework.ServiceStartTimeout, serviceName, namespace, expectedEndpoints))
i := 1 i := 1
for start := time.Now(); time.Since(start) < framework.ServiceStartTimeout; time.Sleep(1 * time.Second) { for start := time.Now(); time.Since(start) < framework.ServiceStartTimeout; time.Sleep(1 * time.Second) {
endpoints, err := c.Core().Endpoints(namespace).Get(serviceName) endpoints, err := c.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err) framework.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
continue continue
@ -1641,7 +1642,7 @@ func createExecPodOrFail(client clientset.Interface, ns, generateName string) st
created, err := client.Core().Pods(ns).Create(execPod) created, err := client.Core().Pods(ns).Create(execPod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) { err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name) retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, nil return false, nil
} }
@ -1660,7 +1661,7 @@ func createExecPodOnNode(client clientset.Interface, ns, nodeName, generateName
created, err := client.Core().Pods(ns).Create(execPod) created, err := client.Core().Pods(ns).Create(execPod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) { err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name) retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, nil return false, nil
} }
@ -1965,7 +1966,7 @@ func startServeHostnameService(c clientset.Interface, internalClient internalcli
} }
sort.StringSlice(podNames).Sort() sort.StringSlice(podNames).Sort()
service, err := c.Core().Services(ns).Get(name) service, err := c.Core().Services(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return podNames, "", err return podNames, "", err
} }
@ -2250,7 +2251,7 @@ func (j *ServiceTestJig) createOnlyLocalLoadBalancerService(namespace, serviceNa
// endpoints of the given Service are running. // endpoints of the given Service are running.
func (j *ServiceTestJig) getEndpointNodes(svc *v1.Service) map[string][]string { func (j *ServiceTestJig) getEndpointNodes(svc *v1.Service) map[string][]string {
nodes := j.getNodes(maxNodesForEndpointsTests) nodes := j.getNodes(maxNodesForEndpointsTests)
endpoints, err := j.Client.Core().Endpoints(svc.Namespace).Get(svc.Name) endpoints, err := j.Client.Core().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err) framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
} }
@ -2287,7 +2288,7 @@ func (j *ServiceTestJig) getNodes(maxNodesForTest int) (nodes *v1.NodeList) {
func (j *ServiceTestJig) waitForEndpointOnNode(namespace, serviceName, nodeName string) { func (j *ServiceTestJig) waitForEndpointOnNode(namespace, serviceName, nodeName string) {
err := wait.PollImmediate(framework.Poll, loadBalancerCreateTimeoutDefault, func() (bool, error) { err := wait.PollImmediate(framework.Poll, loadBalancerCreateTimeoutDefault, func() (bool, error) {
endpoints, err := j.Client.Core().Endpoints(namespace).Get(serviceName) endpoints, err := j.Client.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err) framework.Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err)
return false, nil return false, nil
@ -2349,7 +2350,7 @@ func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceT
// face of timeouts and conflicts. // face of timeouts and conflicts.
func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.Service)) (*v1.Service, error) { func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.Service)) (*v1.Service, error) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
service, err := j.Client.Core().Services(namespace).Get(name) service, err := j.Client.Core().Services(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to get Service %q: %v", name, err) return nil, fmt.Errorf("Failed to get Service %q: %v", name, err)
} }
@ -2403,7 +2404,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeo
var service *v1.Service var service *v1.Service
framework.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name) framework.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name)
pollFunc := func() (bool, error) { pollFunc := func() (bool, error) {
svc, err := j.Client.Core().Services(namespace).Get(name) svc, err := j.Client.Core().Services(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -2430,7 +2431,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string
var service *v1.Service var service *v1.Service
framework.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name) framework.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name)
pollFunc := func() (bool, error) { pollFunc := func() (bool, error) {
svc, err := j.Client.Core().Services(namespace).Get(name) svc, err := j.Client.Core().Services(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -2706,7 +2707,7 @@ func (t *ServiceTestFixture) Cleanup() []error {
for rcName := range t.rcs { for rcName := range t.rcs {
By("stopping RC " + rcName + " in namespace " + t.Namespace) By("stopping RC " + rcName + " in namespace " + t.Namespace)
// First, resize the RC to 0. // First, resize the RC to 0.
old, err := t.Client.Core().ReplicationControllers(t.Namespace).Get(rcName) old, err := t.Client.Core().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{})
if err != nil { if err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
@ -2778,7 +2779,7 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam
err := c.Core().Pods(ns).Delete(execPodName, nil) err := c.Core().Pods(ns).Delete(execPodName, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}() }()
execPod, err := f.ClientSet.Core().Pods(ns).Get(execPodName) execPod, err := f.ClientSet.Core().Pods(ns).Get(execPodName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
var stdout string var stdout string

View File

@ -22,6 +22,7 @@ import (
apierrors "k8s.io/kubernetes/pkg/api/errors" apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/version"
@ -42,7 +43,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
var secrets []v1.ObjectReference var secrets []v1.ObjectReference
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) {
By("waiting for a single token reference") By("waiting for a single token reference")
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
framework.Logf("default service account was not found") framework.Logf("default service account was not found")
return false, nil return false, nil
@ -68,7 +69,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
{ {
By("ensuring the single token reference persists") By("ensuring the single token reference persists")
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(sa.Secrets).To(Equal(secrets)) Expect(sa.Secrets).To(Equal(secrets))
} }
@ -80,7 +81,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
// wait for the referenced secret to be removed, and another one autocreated // wait for the referenced secret to be removed, and another one autocreated
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("waiting for a new token reference") By("waiting for a new token reference")
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("error getting default service account: %v", err) framework.Logf("error getting default service account: %v", err)
return false, err return false, err
@ -106,7 +107,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
{ {
By("ensuring the single token reference persists") By("ensuring the single token reference persists")
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(sa.Secrets).To(Equal(secrets)) Expect(sa.Secrets).To(Equal(secrets))
} }
@ -114,7 +115,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
// delete the reference from the service account // delete the reference from the service account
By("deleting the reference to the service account token") By("deleting the reference to the service account token")
{ {
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
sa.Secrets = nil sa.Secrets = nil
_, updateErr := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Update(sa) _, updateErr := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Update(sa)
@ -124,7 +125,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
// wait for another one to be autocreated // wait for another one to be autocreated
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("waiting for a new token to be created and added") By("waiting for a new token to be created and added")
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("error getting default service account: %v", err) framework.Logf("error getting default service account: %v", err)
return false, err return false, err
@ -146,7 +147,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
{ {
By("ensuring the single token reference persists") By("ensuring the single token reference persists")
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(sa.Secrets).To(Equal(secrets)) Expect(sa.Secrets).To(Equal(secrets))
} }
@ -159,7 +160,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
// Standard get, update retry loop // Standard get, update retry loop
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("getting the auto-created API token") By("getting the auto-created API token")
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
framework.Logf("default service account was not found") framework.Logf("default service account was not found")
return false, nil return false, nil
@ -173,7 +174,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
return false, nil return false, nil
} }
for _, secretRef := range sa.Secrets { for _, secretRef := range sa.Secrets {
secret, err := f.ClientSet.Core().Secrets(f.Namespace.Name).Get(secretRef.Name) secret, err := f.ClientSet.Core().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("Error getting secret %s: %v", secretRef.Name, err) framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
continue continue

View File

@ -150,7 +150,7 @@ func getZoneCount(c clientset.Interface) (int, error) {
// Find the name of the zone in which the pod is scheduled // Find the name of the zone in which the pod is scheduled
func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) { func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) {
By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName)) By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName))
node, err := c.Core().Nodes().Get(pod.Spec.NodeName) node, err := c.Core().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
return getZoneNameForNode(*node) return getZoneNameForNode(*node)
} }

View File

@ -45,11 +45,11 @@ func testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVol
By("checking the claim") By("checking the claim")
// Get new copy of the claim // Get new copy of the claim
claim, err = client.Core().PersistentVolumeClaims(claim.Namespace).Get(claim.Name) claim, err = client.Core().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Get the bound PV // Get the bound PV
pv, err := client.Core().PersistentVolumes().Get(claim.Spec.VolumeName) pv, err := client.Core().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Check sizes // Check sizes

View File

@ -150,7 +150,7 @@ func startVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, serverPod)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, serverPod))
By("locating the server pod") By("locating the server pod")
pod, err := podClient.Get(serverPod.Name) pod, err := podClient.Get(serverPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err) framework.ExpectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err)
By("sleeping a bit to give the server time to start") By("sleeping a bit to give the server time to start")

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/runtime/schema" "k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@ -163,7 +164,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
p, err := f.PodClient().Get(pod.Name) p, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
return p.Status return p.Status
} }

View File

@ -126,7 +126,7 @@ func getThroughputPerfData(batchLag time.Duration, e2eLags []framework.PodLatenc
// getTestNodeInfo fetches the capacity of a node from API server and returns a map of labels. // getTestNodeInfo fetches the capacity of a node from API server and returns a map of labels.
func getTestNodeInfo(f *framework.Framework, testName string) map[string]string { func getTestNodeInfo(f *framework.Framework, testName string) map[string]string {
nodeName := framework.TestContext.NodeName nodeName := framework.TestContext.NodeName
node, err := f.ClientSet.Core().Nodes().Get(nodeName) node, err := f.ClientSet.Core().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
cpu, ok := node.Status.Capacity["cpu"] cpu, ok := node.Status.Capacity["cpu"]

View File

@ -21,6 +21,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@ -66,7 +67,7 @@ func (cc *ConformanceContainer) Delete() error {
} }
func (cc *ConformanceContainer) IsReady() (bool, error) { func (cc *ConformanceContainer) IsReady() (bool, error) {
pod, err := cc.PodClient.Get(cc.podName) pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -74,7 +75,7 @@ func (cc *ConformanceContainer) IsReady() (bool, error) {
} }
func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) { func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
pod, err := cc.PodClient.Get(cc.podName) pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err != nil { if err != nil {
return v1.PodUnknown, err return v1.PodUnknown, err
} }
@ -82,7 +83,7 @@ func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
} }
func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) { func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
pod, err := cc.PodClient.Get(cc.podName) pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err != nil { if err != nil {
return v1.ContainerStatus{}, err return v1.ContainerStatus{}, err
} }
@ -94,7 +95,7 @@ func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
} }
func (cc *ConformanceContainer) Present() (bool, error) { func (cc *ConformanceContainer) Present() (bool, error) {
_, err := cc.PodClient.Get(cc.podName) _, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err == nil { if err == nil {
return true, nil return true, nil
} }

View File

@ -22,6 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -125,7 +126,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
// The pod should be evicted. // The pod should be evicted.
if !evictionOccurred { if !evictionOccurred {
podData, err := podClient.Get(busyPodName) podData, err := podClient.Get(busyPodName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -135,7 +136,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
return err return err
} }
podData, err = podClient.Get(idlePodName) podData, err = podClient.Get(idlePodName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -170,7 +171,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
} }
// The new pod should be able to be scheduled and run after the disk pressure is relieved. // The new pod should be able to be scheduled and run after the disk pressure is relieved.
podData, err := podClient.Get(verifyPodName) podData, err := podClient.Get(verifyPodName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }

View File

@ -22,6 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
docker "k8s.io/kubernetes/pkg/kubelet/dockertools" docker "k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -149,7 +150,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
By("Making sure all containers restart the specified number of times") By("Making sure all containers restart the specified number of times")
Eventually(func() error { Eventually(func() error {
for _, podSpec := range test.testPods { for _, podSpec := range test.testPods {
updatedPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(podSpec.podName) updatedPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(podSpec.podName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }

View File

@ -18,6 +18,7 @@ package e2e_node
import ( import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -51,7 +52,7 @@ var _ = framework.KubeDescribe("ImageID", func() {
framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace( framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout)) f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
runningPod, err := f.PodClient().Get(pod.Name) runningPod, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
status := runningPod.Status status := runningPod.Status

View File

@ -22,7 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
apiunversioned "k8s.io/kubernetes/pkg/apis/meta/v1" metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -56,7 +56,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
}, },
}) })
Eventually(func() string { Eventually(func() string {
sinceTime := apiunversioned.NewTime(time.Now().Add(time.Duration(-1 * time.Hour))) sinceTime := metav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream() rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream()
if err != nil { if err != nil {
return "" return ""
@ -93,7 +93,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
It("should have an error terminated reason", func() { It("should have an error terminated reason", func() {
Eventually(func() error { Eventually(func() error {
podData, err := podClient.Get(podName) podData, err := podClient.Get(podName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }

View File

@ -18,6 +18,7 @@ package e2e_node
import ( import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/kubelet"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -67,7 +68,7 @@ var _ = framework.KubeDescribe("ContainerLogPath", func() {
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName) framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName)
// get containerID from created Pod // get containerID from created Pod
createdLogPod, err := podClient.Get(logPodName) createdLogPod, err := podClient.Get(logPodName, metav1.GetOptions{})
logConID := kubecontainer.ParseContainerID(createdLogPod.Status.ContainerStatuses[0].ContainerID) logConID := kubecontainer.ParseContainerID(createdLogPod.Status.ContainerStatuses[0].ContainerID)
framework.ExpectNoError(err, "Failed to get pod: %s", logPodName) framework.ExpectNoError(err, "Failed to get pod: %s", logPodName)

View File

@ -24,6 +24,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -150,15 +151,15 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
By("polling the Status.Phase of each pod and checking for violations of the eviction order.") By("polling the Status.Phase of each pod and checking for violations of the eviction order.")
Eventually(func() error { Eventually(func() error {
gteed, gtErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(guaranteed.Name) gteed, gtErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(guaranteed.Name, metav1.GetOptions{})
framework.ExpectNoError(gtErr, fmt.Sprintf("getting pod %s", guaranteed.Name)) framework.ExpectNoError(gtErr, fmt.Sprintf("getting pod %s", guaranteed.Name))
gteedPh := gteed.Status.Phase gteedPh := gteed.Status.Phase
burst, buErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(burstable.Name) burst, buErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(burstable.Name, metav1.GetOptions{})
framework.ExpectNoError(buErr, fmt.Sprintf("getting pod %s", burstable.Name)) framework.ExpectNoError(buErr, fmt.Sprintf("getting pod %s", burstable.Name))
burstPh := burst.Status.Phase burstPh := burst.Status.Phase
best, beErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(besteffort.Name) best, beErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(besteffort.Name, metav1.GetOptions{})
framework.ExpectNoError(beErr, fmt.Sprintf("getting pod %s", besteffort.Name)) framework.ExpectNoError(beErr, fmt.Sprintf("getting pod %s", besteffort.Name))
bestPh := best.Status.Phase bestPh := best.Status.Phase

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -57,7 +58,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
}) })
It("should be updated when static pod updated [Conformance]", func() { It("should be updated when static pod updated [Conformance]", func() {
By("get mirror pod uid") By("get mirror pod uid")
pod, err := f.ClientSet.Core().Pods(ns).Get(mirrorPodName) pod, err := f.ClientSet.Core().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
uid := pod.UID uid := pod.UID
@ -72,14 +73,14 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
}, 2*time.Minute, time.Second*4).Should(BeNil()) }, 2*time.Minute, time.Second*4).Should(BeNil())
By("check the mirror pod container image is updated") By("check the mirror pod container image is updated")
pod, err = f.ClientSet.Core().Pods(ns).Get(mirrorPodName) pod, err = f.ClientSet.Core().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
Expect(len(pod.Spec.Containers)).Should(Equal(1)) Expect(len(pod.Spec.Containers)).Should(Equal(1))
Expect(pod.Spec.Containers[0].Image).Should(Equal(image)) Expect(pod.Spec.Containers[0].Image).Should(Equal(image))
}) })
It("should be recreated when mirror pod gracefully deleted [Conformance]", func() { It("should be recreated when mirror pod gracefully deleted [Conformance]", func() {
By("get mirror pod uid") By("get mirror pod uid")
pod, err := f.ClientSet.Core().Pods(ns).Get(mirrorPodName) pod, err := f.ClientSet.Core().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
uid := pod.UID uid := pod.UID
@ -94,7 +95,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
}) })
It("should be recreated when mirror pod forcibly deleted [Conformance]", func() { It("should be recreated when mirror pod forcibly deleted [Conformance]", func() {
By("get mirror pod uid") By("get mirror pod uid")
pod, err := f.ClientSet.Core().Pods(ns).Get(mirrorPodName) pod, err := f.ClientSet.Core().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
uid := pod.UID uid := pod.UID
@ -156,7 +157,7 @@ func deleteStaticPod(dir, name, namespace string) error {
} }
func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error { func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error {
_, err := cl.Core().Pods(namespace).Get(name) _, err := cl.Core().Pods(namespace).Get(name, metav1.GetOptions{})
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
return nil return nil
} }
@ -164,7 +165,7 @@ func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) err
} }
func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error { func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error {
pod, err := cl.Core().Pods(namespace).Get(name) pod, err := cl.Core().Pods(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
} }
@ -175,7 +176,7 @@ func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error
} }
func checkMirrorPodRecreatedAndRunnig(cl clientset.Interface, name, namespace string, oUID types.UID) error { func checkMirrorPodRecreatedAndRunnig(cl clientset.Interface, name, namespace string, oUID types.UID) error {
pod, err := cl.Core().Pods(namespace).Get(name) pod, err := cl.Core().Pods(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
} }

View File

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/apis/componentconfig"
v1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" v1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
// utilconfig "k8s.io/kubernetes/pkg/util/config" // utilconfig "k8s.io/kubernetes/pkg/util/config"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -86,7 +87,7 @@ func getCurrentKubeletConfig() (*componentconfig.KubeletConfiguration, error) {
// Queries the API server for a Kubelet configuration for the node described by framework.TestContext.NodeName // Queries the API server for a Kubelet configuration for the node described by framework.TestContext.NodeName
func getCurrentKubeletConfigMap(f *framework.Framework) (*v1.ConfigMap, error) { func getCurrentKubeletConfigMap(f *framework.Framework) (*v1.ConfigMap, error) {
return f.ClientSet.Core().ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", framework.TestContext.NodeName)) return f.ClientSet.Core().ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", framework.TestContext.NodeName), metav1.GetOptions{})
} }
// Creates or updates the configmap for KubeletConfiguration, waits for the Kubelet to restart // Creates or updates the configmap for KubeletConfiguration, waits for the Kubelet to restart

View File

@ -173,7 +173,7 @@ func TestAtomicPut(t *testing.T) {
go func(l, v string) { go func(l, v string) {
defer wg.Done() defer wg.Done()
for { for {
tmpRC, err := rcs.Get(rc.Name) tmpRC, err := rcs.Get(rc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Errorf("Error getting atomicRC: %v", err) t.Errorf("Error getting atomicRC: %v", err)
continue continue
@ -199,7 +199,7 @@ func TestAtomicPut(t *testing.T) {
}(label, value) }(label, value)
} }
wg.Wait() wg.Wait()
rc, err = rcs.Get(rc.Name) rc, err = rcs.Get(rc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed getting atomicRC after writers are complete: %v", err) t.Fatalf("Failed getting atomicRC after writers are complete: %v", err)
} }
@ -281,7 +281,7 @@ func TestPatch(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err) t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err)
} }
pod, err = pods.Get(name) pod, err = pods.Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed getting patchpod: %v", err) t.Fatalf("Failed getting patchpod: %v", err)
} }
@ -294,7 +294,7 @@ func TestPatch(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err) t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err)
} }
pod, err = pods.Get(name) pod, err = pods.Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed getting patchpod: %v", err) t.Fatalf("Failed getting patchpod: %v", err)
} }
@ -307,7 +307,7 @@ func TestPatch(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err) t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err)
} }
pod, err = pods.Get(name) pod, err = pods.Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed getting patchpod: %v", err) t.Fatalf("Failed getting patchpod: %v", err)
} }
@ -712,7 +712,7 @@ func TestMultiWatch(t *testing.T) {
for i := 0; i < watcherCount; i++ { for i := 0; i < watcherCount; i++ {
go func(i int) { go func(i int) {
name := fmt.Sprintf("multi-watch-%v", i) name := fmt.Sprintf("multi-watch-%v", i)
pod, err := client.Core().Pods(ns.Name).Get(name) pod, err := client.Core().Pods(ns.Name).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
panic(fmt.Sprintf("Couldn't get %v: %v", name, err)) panic(fmt.Sprintf("Couldn't get %v: %v", name, err))
} }

View File

@ -213,7 +213,7 @@ func TestConcurrentEvictionRequests(t *testing.T) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
podName := fmt.Sprintf(podNameFormat, i) podName := fmt.Sprintf(podNameFormat, i)
_, err := clientSet.Core().Pods(ns.Name).Get(podName) _, err := clientSet.Core().Pods(ns.Name).Get(podName, metav1.GetOptions{})
if !errors.IsNotFound(err) { if !errors.IsNotFound(err) {
t.Errorf("Pod %q is expected to be evicted", podName) t.Errorf("Pod %q is expected to be evicted", podName)
} }
@ -243,7 +243,7 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN
func waitPDBStable(t *testing.T, clientSet clientset.Interface, podNum int32, ns, pdbName string) { func waitPDBStable(t *testing.T, clientSet clientset.Interface, podNum int32, ns, pdbName string) {
if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) { if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) {
pdb, err := clientSet.Policy().PodDisruptionBudgets(ns).Get(pdbName) pdb, err := clientSet.Policy().PodDisruptionBudgets(ns).Get(pdbName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -36,6 +36,7 @@ import (
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch"
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1" certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
policy "k8s.io/kubernetes/pkg/apis/policy/v1alpha1" policy "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
rbac "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" rbac "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1" storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
@ -452,7 +453,7 @@ func ScaleRC(name, ns string, replicas int32, clientset internalclientset.Interf
if err != nil { if err != nil {
return nil, err return nil, err
} }
scaled, err := clientset.Core().ReplicationControllers(ns).Get(name) scaled, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -232,10 +232,10 @@ func TestCascadingDeletion(t *testing.T) {
t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err) t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err)
} }
// checks the garbage collect doesn't delete pods it shouldn't delete. // checks the garbage collect doesn't delete pods it shouldn't delete.
if _, err := podClient.Get(independentPodName); err != nil { if _, err := podClient.Get(independentPodName, metav1.GetOptions{}); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if _, err := podClient.Get(oneValidOwnerPodName); err != nil { if _, err := podClient.Get(oneValidOwnerPodName, metav1.GetOptions{}); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }

View File

@ -39,6 +39,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/batch/v2alpha1" "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clienttypedv1 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1" clienttypedv1 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
@ -407,7 +408,7 @@ func TestMasterService(t *testing.T) {
} }
} }
if found { if found {
ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes") ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil { if err != nil {
return false, nil return false, nil
} }
@ -451,7 +452,7 @@ func TestServiceAlloc(t *testing.T) {
// Wait until the default "kubernetes" service is created. // Wait until the default "kubernetes" service is created.
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.Core().Services(api.NamespaceDefault).Get("kubernetes") _, err := client.Core().Services(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
return false, err return false, err
} }
@ -595,7 +596,7 @@ func TestUpdateNodeObjects(t *testing.T) {
break break
} }
n, err := c.Nodes().Get(fmt.Sprintf("node-%d", node)) n, err := c.Nodes().Get(fmt.Sprintf("node-%d", node), metav1.GetOptions{})
if err != nil { if err != nil {
fmt.Printf("[%d] error after %d: %v\n", node, i, err) fmt.Printf("[%d] error after %d: %v\n", node, i, err)
break break

View File

@ -245,7 +245,7 @@ func TestAdoption(t *testing.T) {
waitToObservePods(t, podInformer, 1) waitToObservePods(t, podInformer, 1)
go rm.Run(5, stopCh) go rm.Run(5, stopCh)
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
updatedPod, err := podClient.Get(pod.Name) updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -280,7 +280,7 @@ func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.R
func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.ReplicaSet, ns string) { func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.ReplicaSet, ns string) {
rsClient := clientSet.Extensions().ReplicaSets(ns) rsClient := clientSet.Extensions().ReplicaSets(ns)
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
updatedRS, err := rsClient.Get(rs.Name) updatedRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -371,7 +371,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
podClient := clientSet.Core().Pods(ns.Name) podClient := clientSet.Core().Pods(ns.Name)
pod2, err = podClient.Get(pod2.Name) pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to get pod2: %v", err) t.Fatalf("Failed to get pod2: %v", err)
} }
@ -414,7 +414,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
}); err != nil { }); err != nil {
t.Fatal(err) t.Fatal(err)
} }
pod2, err = podClient.Get(pod2.Name) pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to get pod2: %v", err) t.Fatalf("Failed to get pod2: %v", err)
} }

View File

@ -243,7 +243,7 @@ func TestAdoption(t *testing.T) {
waitToObservePods(t, podInformer, 1) waitToObservePods(t, podInformer, 1)
go rm.Run(5, stopCh) go rm.Run(5, stopCh)
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
updatedPod, err := podClient.Get(pod.Name) updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -278,7 +278,7 @@ func createRCsPods(t *testing.T, clientSet clientset.Interface, rcs []*v1.Replic
func waitRCStable(t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController, ns string) { func waitRCStable(t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController, ns string) {
rcClient := clientSet.Core().ReplicationControllers(ns) rcClient := clientSet.Core().ReplicationControllers(ns)
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
updatedRC, err := rcClient.Get(rc.Name) updatedRC, err := rcClient.Get(rc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -367,7 +367,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
podClient := clientSet.Core().Pods(ns.Name) podClient := clientSet.Core().Pods(ns.Name)
pod2, err = podClient.Get(pod2.Name) pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to get pod2: %v", err) t.Fatalf("Failed to get pod2: %v", err)
} }
@ -409,7 +409,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
}); err != nil { }); err != nil {
t.Fatal(err) t.Fatal(err)
} }
pod2, err = podClient.Get(pod2.Name) pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to get pod2: %v", err) t.Fatalf("Failed to get pod2: %v", err)
} }

View File

@ -298,7 +298,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
t.Fatalf("Failed to schedule pod: %v", err) t.Fatalf("Failed to schedule pod: %v", err)
} }
if myPod, err := cs.Core().Pods(ns.Name).Get(myPod.Name); err != nil { if myPod, err := cs.Core().Pods(ns.Name).Get(myPod.Name, metav1.GetOptions{}); err != nil {
t.Fatalf("Failed to get pod: %v", err) t.Fatalf("Failed to get pod: %v", err)
} else if myPod.Spec.NodeName != "machine3" { } else if myPod.Spec.NodeName != "machine3" {
t.Fatalf("Failed to schedule using extender, expected machine3, got %v", myPod.Spec.NodeName) t.Fatalf("Failed to schedule using extender, expected machine3, got %v", myPod.Spec.NodeName)

View File

@ -76,7 +76,7 @@ func TestUnschedulableNodes(t *testing.T) {
func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
return func() (bool, error) { return func() (bool, error) {
pod, err := c.Core().Pods(podNamespace).Get(podName) pod, err := c.Core().Pods(podNamespace).Get(podName, metav1.GetOptions{})
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
return false, nil return false, nil
} }
@ -268,7 +268,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
} }
// Apply the schedulable modification to the node, and wait for the reflection // Apply the schedulable modification to the node, and wait for the reflection
schedNode, err := cs.Core().Nodes().Get(unSchedNode.Name) schedNode, err := cs.Core().Nodes().Get(unSchedNode.Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to get node: %v", err) t.Fatalf("Failed to get node: %v", err)
} }

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/auth/authenticator" "k8s.io/kubernetes/pkg/auth/authenticator"
"k8s.io/kubernetes/pkg/auth/authenticator/bearertoken" "k8s.io/kubernetes/pkg/auth/authenticator/bearertoken"
"k8s.io/kubernetes/pkg/auth/authorizer" "k8s.io/kubernetes/pkg/auth/authorizer"
@ -139,7 +140,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
} }
// Trigger creation of a new referenced token // Trigger creation of a new referenced token
serviceAccount, err = c.Core().ServiceAccounts(ns).Get(name) serviceAccount, err = c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -435,13 +436,13 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*v1.ServiceAccount, error) { func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*v1.ServiceAccount, error) {
if !shouldWait { if !shouldWait {
return c.Core().ServiceAccounts(ns).Get(name) return c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
} }
var user *v1.ServiceAccount var user *v1.ServiceAccount
var err error var err error
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
user, err = c.Core().ServiceAccounts(ns).Get(name) user, err = c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
return false, nil return false, nil
} }
@ -458,7 +459,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st
token := "" token := ""
findToken := func() (bool, error) { findToken := func() (bool, error) {
user, err := c.Core().ServiceAccounts(ns).Get(name) user, err := c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
return false, nil return false, nil
} }
@ -467,7 +468,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st
} }
for _, ref := range user.Secrets { for _, ref := range user.Secrets {
secret, err := c.Core().Secrets(ns).Get(ref.Name) secret, err := c.Core().Secrets(ns).Get(ref.Name, metav1.GetOptions{})
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
continue continue
} }

View File

@ -21,6 +21,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -48,7 +49,7 @@ var Code503 = map[int]bool{503: true}
// WaitForPodToDisappear polls the API server if the pod has been deleted. // WaitForPodToDisappear polls the API server if the pod has been deleted.
func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, interval, timeout time.Duration) error { func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) { return wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := podClient.Get(podName) _, err := podClient.Get(podName, metav1.GetOptions{})
if err == nil { if err == nil {
return false, nil return false, nil
} else { } else {

View File

@ -275,7 +275,7 @@ func TestPersistentVolumeBindRace(t *testing.T) {
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound) waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
glog.V(2).Infof("TestPersistentVolumeBindRace pvc bound") glog.V(2).Infof("TestPersistentVolumeBindRace pvc bound")
pv, err = testClient.PersistentVolumes().Get(pv.Name) pv, err = testClient.PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -347,14 +347,14 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound) waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
t.Log("claim bound") t.Log("claim bound")
pv, err := testClient.PersistentVolumes().Get("pv-false") pv, err := testClient.PersistentVolumes().Get("pv-false", metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
if pv.Spec.ClaimRef != nil { if pv.Spec.ClaimRef != nil {
t.Fatalf("False PV shouldn't be bound") t.Fatalf("False PV shouldn't be bound")
} }
pv, err = testClient.PersistentVolumes().Get("pv-true") pv, err = testClient.PersistentVolumes().Get("pv-true", metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -446,14 +446,14 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound) waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
t.Log("claim bound") t.Log("claim bound")
pv, err := testClient.PersistentVolumes().Get("pv-false") pv, err := testClient.PersistentVolumes().Get("pv-false", metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
if pv.Spec.ClaimRef != nil { if pv.Spec.ClaimRef != nil {
t.Fatalf("False PV shouldn't be bound") t.Fatalf("False PV shouldn't be bound")
} }
pv, err = testClient.PersistentVolumes().Get("pv-true") pv, err = testClient.PersistentVolumes().Get("pv-true", metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -520,7 +520,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
// only one PV is bound // only one PV is bound
bound := 0 bound := 0
for i := 0; i < maxPVs; i++ { for i := 0; i < maxPVs; i++ {
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name) pv, err := testClient.PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -614,7 +614,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
// Modify PV // Modify PV
i := rand.Intn(objCount) i := rand.Intn(objCount)
name := "pv-" + strconv.Itoa(i) name := "pv-" + strconv.Itoa(i)
pv, err := testClient.PersistentVolumes().Get(name) pv, err := testClient.PersistentVolumes().Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
// Silently ignore error, the PV may have be already deleted // Silently ignore error, the PV may have be already deleted
// or not exists yet. // or not exists yet.
@ -638,7 +638,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
// Modify PVC // Modify PVC
i := rand.Intn(objCount) i := rand.Intn(objCount)
name := "pvc-" + strconv.Itoa(i) name := "pvc-" + strconv.Itoa(i)
pvc, err := testClient.PersistentVolumeClaims(v1.NamespaceDefault).Get(name) pvc, err := testClient.PersistentVolumeClaims(v1.NamespaceDefault).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
// Silently ignore error, the PVC may have be already // Silently ignore error, the PVC may have be already
// deleted or not exists yet. // deleted or not exists yet.
@ -693,7 +693,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
// check that everything is bound to something // check that everything is bound to something
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name) pv, err := testClient.PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -702,7 +702,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
} }
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name) pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pvc: %v", err) t.Fatalf("Unexpected error getting pvc: %v", err)
} }
@ -822,7 +822,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
// check that everything is bound to something // check that everything is bound to something
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name) pv, err := testClient.PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -831,7 +831,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
} }
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name) pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name, metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pvc: %v", err) t.Fatalf("Unexpected error getting pvc: %v", err)
} }
@ -991,14 +991,14 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
t.Log("claim bound") t.Log("claim bound")
// only RWM PV is bound // only RWM PV is bound
pv, err := testClient.PersistentVolumes().Get("pv-rwo") pv, err := testClient.PersistentVolumes().Get("pv-rwo", metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
if pv.Spec.ClaimRef != nil { if pv.Spec.ClaimRef != nil {
t.Fatalf("ReadWriteOnce PV shouldn't be bound") t.Fatalf("ReadWriteOnce PV shouldn't be bound")
} }
pv, err = testClient.PersistentVolumes().Get("pv-rwm") pv, err = testClient.PersistentVolumes().Get("pv-rwm", metav1.GetOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err) t.Fatalf("Unexpected error getting pv: %v", err)
} }
@ -1021,7 +1021,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase v1.PersistentVolumePhase) { func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase v1.PersistentVolumePhase) {
// Check if the volume is already in requested phase // Check if the volume is already in requested phase
volume, err := client.Core().PersistentVolumes().Get(pvName) volume, err := client.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err == nil && volume.Status.Phase == phase { if err == nil && volume.Status.Phase == phase {
return return
} }
@ -1042,7 +1042,7 @@ func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w
func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase v1.PersistentVolumeClaimPhase) { func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase v1.PersistentVolumeClaimPhase) {
// Check if the claim is already in requested phase // Check if the claim is already in requested phase
claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName) claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
if err == nil && claim.Status.Phase == phase { if err == nil && claim.Status.Phase == phase {
return return
} }

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"github.com/golang/glog" "github.com/golang/glog"
@ -61,7 +62,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri
var node *v1.Node var node *v1.Node
var err error var err error
for attempt := 0; attempt < retries; attempt++ { for attempt := 0; attempt < retries; attempt++ {
node, err = c.Core().Nodes().Get(nodeName) node, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -92,7 +93,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri
// VerifyLabelsRemoved checks if Node for given nodeName does not have any of labels from labelKeys. // VerifyLabelsRemoved checks if Node for given nodeName does not have any of labels from labelKeys.
// Return non-nil error if it does. // Return non-nil error if it does.
func VerifyLabelsRemoved(c clientset.Interface, nodeName string, labelKeys []string) error { func VerifyLabelsRemoved(c clientset.Interface, nodeName string, labelKeys []string) error {
node, err := c.Core().Nodes().Get(nodeName) node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }

View File

@ -772,7 +772,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo
func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error { func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error {
for attempt := 0; attempt < retries; attempt++ { for attempt := 0; attempt < retries; attempt++ {
node, err := client.Core().Nodes().Get(nodeName) node, err := client.Core().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("Skipping cleanup of Node: failed to get Node %v: %v", nodeName, err) return fmt.Errorf("Skipping cleanup of Node: failed to get Node %v: %v", nodeName, err)
} }