mirror of https://github.com/k3s-io/k3s
Merge pull request #59056 from mkumatag/pause_manifest
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Use pause manifest image **What this PR does / why we need it**: As pause manifest code is merged part of https://github.com/kubernetes/kubernetes/pull/57723, now its time to remove all architecture-dependent pause imagename. **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes # **Special notes for your reviewer**: **Release note**: ``` NONE ```pull/8/head
commit
489178d270
|
@ -33,8 +33,7 @@ const (
|
|||
|
||||
var (
|
||||
defaultPodSandboxImage = defaultPodSandboxImageName +
|
||||
"-" + runtime.GOARCH + ":" +
|
||||
defaultPodSandboxImageVersion
|
||||
":" + defaultPodSandboxImageVersion
|
||||
)
|
||||
|
||||
// NewContainerRuntimeOptions will create a new ContainerRuntimeOptions with
|
||||
|
|
|
@ -720,9 +720,9 @@ run_pod_tests() {
|
|||
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:'
|
||||
## Patch pod from JSON can change image
|
||||
# Command
|
||||
kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "k8s.gcr.io/pause-amd64:3.1"}]}}'
|
||||
kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "k8s.gcr.io/pause:3.1"}]}}'
|
||||
# Post-condition: valid-pod POD has expected image
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/pause-amd64:3.1:'
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/pause:3.1:'
|
||||
|
||||
## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected
|
||||
ERROR_FILE="${KUBE_TEMP}/conflict-error"
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
"containers": [
|
||||
{
|
||||
"name": "kubernetes-pause",
|
||||
"image": "k8s.gcr.io/pause-amd64:3.1"
|
||||
"image": "k8s.gcr.io/pause:3.1"
|
||||
}
|
||||
],
|
||||
"restartPolicy": "Never",
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
defaultSandboxImage = "k8s.gcr.io/pause-amd64:3.1"
|
||||
defaultSandboxImage = "k8s.gcr.io/pause:3.1"
|
||||
|
||||
// Various default sandbox resources requests/limits.
|
||||
defaultSandboxCPUshares int64 = 2
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/apps"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -47,7 +48,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
|
|||
Client: f.ClientSet,
|
||||
Name: "baz",
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 1,
|
||||
})).NotTo(HaveOccurred())
|
||||
})
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -114,7 +115,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -135,7 +136,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -545,7 +545,7 @@ func toBeMutatedPod(f *framework.Framework) *v1.Pod {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "example",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -902,7 +902,7 @@ func nonCompliantPod(f *framework.Framework) *v1.Pod {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "webhook-disallow",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -921,7 +921,7 @@ func hangingPod(f *framework.Framework) *v1.Pod {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "wait-forever",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -202,7 +203,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
|||
InternalClient: f.InternalClientset,
|
||||
Name: rcName,
|
||||
Namespace: ns,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: numPods,
|
||||
CreatedPods: &[]*v1.Pod{},
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
|||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{{
|
||||
Name: "pause",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -128,7 +129,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
NodeName: nodeName,
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -322,7 +323,7 @@ func restrictedPod(f *framework.Framework, name string) *v1.Pod {
|
|||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: boolPtr(false),
|
||||
RunAsUser: utilpointer.Int64Ptr(65534),
|
||||
|
|
|
@ -24,6 +24,7 @@ go_library(
|
|||
"//test/e2e/instrumentation/monitoring:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -432,7 +433,7 @@ func reserveMemoryRCConfig(f *framework.Framework, id string, replicas, megabyte
|
|||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
MemRequest: int64(1024 * 1024 * megabytes / replicas),
|
||||
}
|
||||
|
@ -492,7 +493,7 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p
|
|||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
HostPorts: map[string]int{"port1": port},
|
||||
MemRequest: request,
|
||||
|
|
|
@ -45,6 +45,7 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -1395,7 +1396,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
|
|||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
MemRequest: request,
|
||||
NodeSelector: selector,
|
||||
|
@ -1633,7 +1634,7 @@ func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id
|
|||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: scaleUpTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: pods,
|
||||
Labels: podLabels,
|
||||
}
|
||||
|
@ -1657,7 +1658,7 @@ func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods in
|
|||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: scaleUpTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: pods,
|
||||
Labels: podLabels,
|
||||
}
|
||||
|
@ -1738,7 +1739,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
|
|||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: defaultTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 0,
|
||||
Labels: labels,
|
||||
MemRequest: memRequest,
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -130,7 +131,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "run1",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
|
@ -194,7 +195,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "run1",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
|
|
|
@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -3493,7 +3493,7 @@ func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]s
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: GetPauseImageName(c),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Ports: containerPorts,
|
||||
// Add a dummy environment variable to work around a docker issue.
|
||||
// https://github.com/docker/docker/issues/14203
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
@ -127,7 +128,7 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int) (output
|
|||
cfg := testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "svc-latency-rc",
|
||||
Namespace: f.Namespace.Name,
|
||||
Replicas: 1,
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -319,7 +320,7 @@ var _ = SIGDescribe("kubelet", func() {
|
|||
InternalClient: f.InternalClientset,
|
||||
Name: rcName,
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: totalPods,
|
||||
NodeSelector: nodeLabels,
|
||||
})).NotTo(HaveOccurred())
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -74,7 +75,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
|
|||
InternalClient: f.InternalClientset,
|
||||
Name: rcName,
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: totalPods,
|
||||
})).NotTo(HaveOccurred())
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ go_library(
|
|||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/timer:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
|
|
@ -45,6 +45,7 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/timer"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -586,7 +587,7 @@ var _ = SIGDescribe("Density", func() {
|
|||
Client: clients[i],
|
||||
InternalClient: internalClients[i],
|
||||
ScalesGetter: scalesClients[i],
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: name,
|
||||
Namespace: nsName,
|
||||
Labels: map[string]string{"type": "densityPod"},
|
||||
|
@ -744,7 +745,7 @@ var _ = SIGDescribe("Density", func() {
|
|||
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
|
||||
nsName := namespaces[i%len(namespaces)].Name
|
||||
rcNameToNsMap[name] = nsName
|
||||
go createRunningPodFromRC(&wg, c, name, nsName, framework.GetPauseImageName(f.ClientSet), additionalPodsPrefix, cpuRequest, memRequest)
|
||||
go createRunningPodFromRC(&wg, c, name, nsName, imageutils.GetPauseImageName(), additionalPodsPrefix, cpuRequest, memRequest)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
wg.Wait()
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"k8s.io/api/core/v1"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
@ -42,7 +43,7 @@ var _ = SIGDescribe("Empty [Feature:Empty]", func() {
|
|||
})
|
||||
|
||||
It("starts a pod", func() {
|
||||
configs, _, _ := GenerateConfigsForGroup([]*v1.Namespace{f.Namespace}, "empty-pod", 1, 1, framework.GetPauseImageName(f.ClientSet), []string{}, api.Kind("ReplicationController"), 0, 0)
|
||||
configs, _, _ := GenerateConfigsForGroup([]*v1.Namespace{f.Namespace}, "empty-pod", 1, 1, imageutils.GetPauseImageName(), []string{}, api.Kind("ReplicationController"), 0, 0)
|
||||
if len(configs) != 1 {
|
||||
framework.Failf("generateConfigs should have generated single config")
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -139,7 +140,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
|||
},
|
||||
},
|
||||
}
|
||||
rc := getRCWithInterPodAffinity(affinityRCName, labelsMap, replica, affinity, framework.GetPauseImageName(f.ClientSet))
|
||||
rc := getRCWithInterPodAffinity(affinityRCName, labelsMap, replica, affinity, imageutils.GetPauseImageName())
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, affinityRCName)
|
||||
|
||||
// RC should be running successfully
|
||||
|
@ -216,7 +217,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
|||
},
|
||||
}
|
||||
rc := getRCWithInterPodAffinityNodeSelector(labelRCName, labelsMap, replica, affinity,
|
||||
framework.GetPauseImageName(f.ClientSet), map[string]string{k: v})
|
||||
imageutils.GetPauseImageName(), map[string]string{k: v})
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, labelRCName)
|
||||
|
||||
WaitForSchedulerAfterAction(f, func() error {
|
||||
|
@ -273,7 +274,7 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
|
|||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: defaultTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
HostPorts: map[string]int{"port1": 4321},
|
||||
NodeSelector: nodeSelector,
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -658,7 +659,7 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: conf.Name,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Ports: conf.Ports,
|
||||
},
|
||||
},
|
||||
|
@ -823,7 +824,7 @@ func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectR
|
|||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: defaultTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
HostPorts: map[string]int{"port1": 4321},
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type Resource struct {
|
||||
|
@ -376,7 +377,7 @@ func createRC(ns, rsName string, replicas int32, rcPodLabels map[string]string,
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: rsName,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: *resource,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -124,7 +125,7 @@ func reserveCpu(f *framework.Framework, id string, replicas, millicores int) {
|
|||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: defaultTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
CpuRequest: request,
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/quota/evaluator/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -798,7 +799,7 @@ func newTestPodForQuota(f *framework.Framework, name string, requests v1.Resourc
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: requests,
|
||||
Limits: limits,
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Multi-AZ Clusters", func() {
|
||||
|
@ -91,7 +92,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -131,7 +131,7 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: imageutils.GetPauseImageNameForHostArch(),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: resources,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -332,7 +332,7 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
|||
)
|
||||
|
||||
// create test pod data structure
|
||||
pods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageNameForHostArch(), podType)
|
||||
pods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageName(), podType)
|
||||
|
||||
// the controller watches the change of pod status
|
||||
controller := newInformerWatchPod(f, mutex, watchTimes, podType)
|
||||
|
@ -413,8 +413,8 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
|||
podType = "density_test_pod"
|
||||
sleepBeforeCreatePods = 30 * time.Second
|
||||
)
|
||||
bgPods := newTestPods(testArg.bgPodsNr, true, imageutils.GetPauseImageNameForHostArch(), "background_pod")
|
||||
testPods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageNameForHostArch(), podType)
|
||||
bgPods := newTestPods(testArg.bgPodsNr, true, imageutils.GetPauseImageName(), "background_pod")
|
||||
testPods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageName(), podType)
|
||||
|
||||
By("Creating a batch of background pods")
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -155,7 +156,7 @@ func runPodCheckpointTest(f *framework.Framework, podName string, twist func())
|
|||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "pause-container",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -462,7 +462,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
|||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageutils.GetPauseImageNameForHostArch(),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: podName,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -51,7 +51,7 @@ var NodeImageWhiteList = sets.NewString(
|
|||
imageutils.GetE2EImage(imageutils.ServeHostname),
|
||||
imageutils.GetE2EImage(imageutils.Netexec),
|
||||
imageutils.GetE2EImage(imageutils.Nonewprivs),
|
||||
imageutils.GetPauseImageNameForHostArch(),
|
||||
imageutils.GetPauseImageName(),
|
||||
framework.GetGPUDevicePluginImage(),
|
||||
)
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
|||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageNameForHostArch(), lifecycle)
|
||||
podWithHook := getPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageName(), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
framework.ConformanceIt("should execute prestop http hook properly", func() {
|
||||
|
@ -129,7 +129,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
|||
},
|
||||
},
|
||||
}
|
||||
podWithHook := getPodWithHook("pod-with-prestop-http-hook", imageutils.GetPauseImageNameForHostArch(), lifecycle)
|
||||
podWithHook := getPodWithHook("pod-with-prestop-http-hook", imageutils.GetPauseImageName(), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
})
|
||||
|
|
|
@ -64,7 +64,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
|||
uid := pod.UID
|
||||
|
||||
By("update the static pod container image")
|
||||
image := imageutils.GetPauseImageNameForHostArch()
|
||||
image := imageutils.GetPauseImageName()
|
||||
err = createStaticPod(podPath, staticPodName, ns, image, v1.RestartPolicyAlways)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -188,7 +189,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
|
@ -232,7 +233,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")),
|
||||
},
|
||||
|
@ -276,7 +277,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
|||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
|
|
|
@ -143,7 +143,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
|
|||
// sleep for an interval here to measure steady data
|
||||
sleepAfterCreatePods = 10 * time.Second
|
||||
)
|
||||
pods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageNameForHostArch(), "test_pod")
|
||||
pods := newTestPods(testArg.podsNr, true, imageutils.GetPauseImageName(), "test_pod")
|
||||
|
||||
rc.Start()
|
||||
// Explicitly delete pods to prevent namespace controller cleanning up timeout
|
||||
|
|
|
@ -81,7 +81,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
|||
Context("Network", func() {
|
||||
It("should recover from ip leak", func() {
|
||||
|
||||
pods := newTestPods(podCount, false, imageutils.GetPauseImageNameForHostArch(), "restart-container-runtime-test")
|
||||
pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")
|
||||
By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
||||
createBatchPodWithRateControl(f, pods, podCreationInterval)
|
||||
defer deletePodsSync(f, pods)
|
||||
|
|
|
@ -43,7 +43,7 @@ var _ = framework.KubeDescribe("SimpleMount", func() {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "simple-mount-container",
|
||||
Image: imageutils.GetPauseImageNameForHostArch(),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "simply-mounted-volume",
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
"spec": {
|
||||
"containers": [{
|
||||
"name": "test-container",
|
||||
"image": "k8s.gcr.io/pause-amd64:3.1"
|
||||
"image": "k8s.gcr.io/pause:3.1"
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,8 +18,8 @@ go_test(
|
|||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
|
|
|
@ -41,8 +41,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
|
@ -612,7 +612,7 @@ func TestMultiWatch(t *testing.T) {
|
|||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: e2e.GetPauseImageName(client),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
@ -718,7 +718,7 @@ func TestMultiWatch(t *testing.T) {
|
|||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nothing",
|
||||
Image: e2e.GetPauseImageName(client),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
@ -749,7 +749,7 @@ func TestMultiWatch(t *testing.T) {
|
|||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't get %v: %v", name, err))
|
||||
}
|
||||
pod.Spec.Containers[0].Image = e2e.GetPauseImageName(client)
|
||||
pod.Spec.Containers[0].Image = imageutils.GetPauseImageName()
|
||||
sentTimes <- timePair{time.Now(), name}
|
||||
if _, err := client.Core().Pods(ns.Name).Update(pod); err != nil {
|
||||
panic(fmt.Sprintf("Couldn't make %v: %v", name, err))
|
||||
|
|
|
@ -20,12 +20,10 @@ package framework
|
|||
|
||||
import (
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -35,27 +33,6 @@ const (
|
|||
currentPodInfraContainerImageVersion = "3.1"
|
||||
)
|
||||
|
||||
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
|
||||
func GetServerArchitecture(c clientset.Interface) string {
|
||||
arch := ""
|
||||
sVer, err := c.Discovery().ServerVersion()
|
||||
if err != nil || sVer.Platform == "" {
|
||||
// If we failed to get the server version for some reason, default to amd64.
|
||||
arch = "amd64"
|
||||
} else {
|
||||
// Split the platform string into OS and Arch separately.
|
||||
// The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64".
|
||||
osArchArray := strings.Split(sVer.Platform, "/")
|
||||
arch = osArchArray[1]
|
||||
}
|
||||
return arch
|
||||
}
|
||||
|
||||
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
|
||||
func GetPauseImageName(c clientset.Interface) string {
|
||||
return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion
|
||||
}
|
||||
|
||||
func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *testing.T) *v1.Namespace {
|
||||
// TODO: Create a namespace with a given basename.
|
||||
// Currently we neither create the namespace nor delete all of its contents at the end.
|
||||
|
|
|
@ -40,9 +40,9 @@ go_test(
|
|||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
|
@ -93,6 +93,7 @@ go_library(
|
|||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -390,7 +390,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: e2e.GetPauseImageName(cs),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
extendedResourceName: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
|
|
|
@ -24,8 +24,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// This file tests the scheduler predicates functionality.
|
||||
|
@ -75,7 +75,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -108,7 +108,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -140,7 +140,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -167,7 +167,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name,
|
||||
},
|
||||
},
|
||||
|
@ -183,7 +183,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -205,7 +205,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
|
@ -221,7 +221,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -244,7 +244,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
|
@ -260,7 +260,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -282,7 +282,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
|
@ -297,7 +297,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -336,7 +336,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
|
@ -351,7 +351,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Name: "fakename",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -390,7 +390,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
|
@ -405,7 +405,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -443,7 +443,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
|
@ -458,7 +458,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -498,7 +498,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name,
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
|
@ -535,7 +535,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -573,7 +573,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
|
@ -588,7 +588,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -629,7 +629,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodes[0].Name,
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -665,7 +665,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -687,7 +687,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename2",
|
||||
Labels: podLabel}}},
|
||||
|
@ -701,12 +701,12 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Name: "fakename",
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}}},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}}},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{NodeName: nodes[0].Name,
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -741,12 +741,12 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
Name: "fake-name",
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}}},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}}},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{NodeName: nodes[0].Name,
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
|
@ -779,7 +779,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "fake-name2"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeSelector: map[string]string{"region": "r1"},
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
|
@ -803,7 +803,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: framework.GetPauseImageName(cs)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: imageutils.GetPauseImageName()}},
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{Name: "fakename", Labels: map[string]string{"foo": "abc"}}},
|
||||
},
|
||||
fits: false,
|
||||
|
|
|
@ -47,6 +47,7 @@ import (
|
|||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"net/http/httptest"
|
||||
)
|
||||
|
@ -319,7 +320,7 @@ func initPausePod(cs clientset.Interface, conf *pausePodConfig) *v1.Pod {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: conf.Name,
|
||||
Image: framework.GetPauseImageName(cs),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
Tolerations: conf.Tolerations,
|
||||
|
|
|
@ -84,7 +84,7 @@ var (
|
|||
NoSnatTestProxy = ImageConfig{e2eRegistry, "no-snat-test-proxy", "1.0", true}
|
||||
NWayHTTP = ImageConfig{e2eRegistry, "n-way-http", "1.0", true}
|
||||
// When these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
Pause = ImageConfig{gcRegistry, "pause", "3.1", true}
|
||||
Pause = ImageConfig{gcRegistry, "pause", "3.1", false}
|
||||
Porter = ImageConfig{e2eRegistry, "porter", "1.0", true}
|
||||
PortForwardTester = ImageConfig{e2eRegistry, "port-forward-tester", "1.0", true}
|
||||
Redis = ImageConfig{e2eRegistry, "redis", "1.0", true}
|
||||
|
@ -112,7 +112,7 @@ func GetE2EImageWithArch(image ImageConfig, arch string) string {
|
|||
}
|
||||
}
|
||||
|
||||
// GetPauseImageNameForHostArch fetches the pause image name for the same architecture the test is running on.
|
||||
func GetPauseImageNameForHostArch() string {
|
||||
// GetPauseImageName returns the pause image name with proper version
|
||||
func GetPauseImageName() string {
|
||||
return GetE2EImage(Pause)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue