2015-07-13 14:27:35 +00:00
/ *
2016-06-03 00:25:58 +00:00
Copyright 2015 The Kubernetes Authors .
2015-07-13 14:27:35 +00:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
2017-03-12 21:49:33 +00:00
package scheduling
2015-07-13 14:27:35 +00:00
import (
"fmt"
"time"
2017-06-22 18:24:23 +00:00
"k8s.io/api/core/v1"
2017-01-13 17:48:50 +00:00
"k8s.io/apimachinery/pkg/api/errors"
2017-01-25 13:13:07 +00:00
"k8s.io/apimachinery/pkg/api/resource"
2017-01-11 14:09:48 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2017-02-15 09:00:50 +00:00
"k8s.io/apimachinery/pkg/labels"
2017-01-11 14:09:48 +00:00
"k8s.io/apimachinery/pkg/util/sets"
2017-01-24 14:35:22 +00:00
"k8s.io/apimachinery/pkg/util/uuid"
2017-06-23 20:56:37 +00:00
clientset "k8s.io/client-go/kubernetes"
2017-09-08 21:30:31 +00:00
utilversion "k8s.io/kubernetes/pkg/util/version"
2017-03-15 21:42:19 +00:00
"k8s.io/kubernetes/test/e2e/common"
2016-04-07 17:21:31 +00:00
"k8s.io/kubernetes/test/e2e/framework"
2016-10-12 11:37:37 +00:00
testutils "k8s.io/kubernetes/test/utils"
2015-07-13 14:27:35 +00:00
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
2016-01-26 23:03:18 +00:00
_ "github.com/stretchr/testify/assert"
2015-07-13 14:27:35 +00:00
)
2016-07-19 15:11:23 +00:00
const maxNumberOfPods int64 = 10
2017-09-08 21:30:31 +00:00
var localStorageVersion = utilversion . MustParseSemantic ( "v1.8.0-beta.0" )
2016-03-01 14:19:17 +00:00
// variable set in BeforeEach, never modified afterwards
var masterNodes sets . String
2016-09-23 11:25:35 +00:00
type pausePodConfig struct {
Name string
2016-12-08 15:14:21 +00:00
Affinity * v1 . Affinity
2016-09-23 11:25:35 +00:00
Annotations , Labels , NodeSelector map [ string ] string
2016-11-18 20:55:17 +00:00
Resources * v1 . ResourceRequirements
2017-02-07 14:08:45 +00:00
Tolerations [ ] v1 . Toleration
2017-03-10 15:41:04 +00:00
NodeName string
2017-02-15 09:00:50 +00:00
Ports [ ] v1 . ContainerPort
OwnerReferences [ ] metav1 . OwnerReference
2017-08-10 01:15:40 +00:00
PriorityClassName string
2015-08-19 09:29:35 +00:00
}
2017-07-15 06:14:36 +00:00
var _ = SIGDescribe ( "SchedulerPredicates [Serial]" , func ( ) {
2016-10-10 10:36:00 +00:00
var cs clientset . Interface
2016-11-18 20:55:17 +00:00
var nodeList * v1 . NodeList
2016-02-05 01:26:42 +00:00
var systemPodsNo int
2015-07-13 14:27:35 +00:00
var totalPodCapacity int64
var RCName string
var ns string
2016-09-21 14:20:25 +00:00
f := framework . NewDefaultFramework ( "sched-pred" )
2016-05-26 18:24:49 +00:00
ignoreLabels := framework . ImagePullerLabels
2015-07-13 14:27:35 +00:00
AfterEach ( func ( ) {
2017-08-13 09:07:29 +00:00
rc , err := cs . CoreV1 ( ) . ReplicationControllers ( ns ) . Get ( RCName , metav1 . GetOptions { } )
2016-11-18 20:55:17 +00:00
if err == nil && * ( rc . Spec . Replicas ) != 0 {
2015-07-13 14:27:35 +00:00
By ( "Cleaning up the replication controller" )
2016-11-18 20:55:17 +00:00
err := framework . DeleteRCAndPods ( f . ClientSet , f . InternalClientset , ns , RCName )
2016-04-07 17:21:31 +00:00
framework . ExpectNoError ( err )
2015-07-13 14:27:35 +00:00
}
} )
2015-10-27 13:07:51 +00:00
BeforeEach ( func ( ) {
2016-10-10 10:36:00 +00:00
cs = f . ClientSet
2016-04-07 17:21:31 +00:00
ns = f . Namespace . Name
2016-11-18 20:55:17 +00:00
nodeList = & v1 . NodeList { }
2016-02-05 01:26:42 +00:00
2016-10-18 13:00:38 +00:00
framework . WaitForAllNodesHealthy ( cs , time . Minute )
masterNodes , nodeList = framework . GetMasterAndWorkerNodesOrDie ( cs )
2016-08-08 12:45:57 +00:00
2016-10-18 13:00:38 +00:00
err := framework . CheckTestingNSDeletedExcept ( cs , ns )
2016-04-07 17:21:31 +00:00
framework . ExpectNoError ( err )
2016-03-01 15:23:17 +00:00
2016-02-05 01:26:42 +00:00
// Every test case in this suite assumes that cluster add-on pods stay stable and
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
2016-10-18 13:00:38 +00:00
systemPods , err := framework . GetPodsInNamespace ( cs , ns , ignoreLabels )
2016-02-05 01:26:42 +00:00
Expect ( err ) . NotTo ( HaveOccurred ( ) )
2016-03-01 14:19:17 +00:00
systemPodsNo = 0
2016-05-26 18:24:49 +00:00
for _ , pod := range systemPods {
2016-03-01 14:19:17 +00:00
if ! masterNodes . Has ( pod . Spec . NodeName ) && pod . DeletionTimestamp == nil {
systemPodsNo ++
}
}
2016-02-05 01:26:42 +00:00
2017-03-28 09:05:57 +00:00
err = framework . WaitForPodsRunningReady ( cs , metav1 . NamespaceSystem , int32 ( systemPodsNo ) , 0 , framework . PodReadyBeforeTimeout , ignoreLabels )
2016-02-05 01:26:42 +00:00
Expect ( err ) . NotTo ( HaveOccurred ( ) )
2016-03-03 15:48:36 +00:00
2017-07-18 20:04:46 +00:00
err = framework . WaitForPodsSuccess ( cs , metav1 . NamespaceSystem , framework . ImagePullerLabels , framework . ImagePrePullingTimeout )
2017-05-10 15:11:29 +00:00
Expect ( err ) . NotTo ( HaveOccurred ( ) )
2016-03-03 15:48:36 +00:00
for _ , node := range nodeList . Items {
2016-04-07 17:21:31 +00:00
framework . Logf ( "\nLogging pods the kubelet thinks is on node %v before test" , node . Name )
2016-10-18 13:00:38 +00:00
framework . PrintAllKubeletPods ( cs , node . Name )
2016-03-03 15:48:36 +00:00
}
2015-10-27 13:07:51 +00:00
} )
2015-07-13 14:27:35 +00:00
// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
// and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check
// if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.
2015-12-20 19:00:00 +00:00
//
// Slow PR #13315 (8 min)
It ( "validates MaxPods limit number of pods that are allowed to run [Slow]" , func ( ) {
2015-07-13 14:27:35 +00:00
totalPodCapacity = 0
for _ , node := range nodeList . Items {
2016-04-07 17:21:31 +00:00
framework . Logf ( "Node: %v" , node )
2017-08-21 03:24:27 +00:00
podCapacity , found := node . Status . Capacity [ v1 . ResourcePods ]
2015-07-13 14:27:35 +00:00
Expect ( found ) . To ( Equal ( true ) )
totalPodCapacity += podCapacity . Value ( )
}
2016-10-18 13:00:38 +00:00
currentlyScheduledPods := framework . WaitForStableCluster ( cs , masterNodes )
2015-10-13 09:59:17 +00:00
podsNeededForSaturation := int ( totalPodCapacity ) - currentlyScheduledPods
2015-07-13 14:27:35 +00:00
By ( fmt . Sprintf ( "Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one" , podsNeededForSaturation ) )
2016-05-06 09:58:35 +00:00
// As the pods are distributed randomly among nodes,
// it can easily happen that all nodes are satured
// and there is no need to create additional pods.
// StartPods requires at least one pod to replicate.
if podsNeededForSaturation > 0 {
2016-10-18 13:00:38 +00:00
framework . ExpectNoError ( testutils . StartPods ( cs , podsNeededForSaturation , ns , "maxp" ,
2016-09-23 11:25:35 +00:00
* initPausePod ( f , pausePodConfig {
2016-05-06 09:58:35 +00:00
Name : "" ,
Labels : map [ string ] string { "name" : "" } ,
2016-10-12 11:37:01 +00:00
} ) , true , framework . Logf ) )
2016-05-06 09:58:35 +00:00
}
2015-07-13 14:27:35 +00:00
podName := "additional-pod"
2017-03-15 21:42:19 +00:00
WaitForSchedulerAfterAction ( f , createPausePodAction ( f , pausePodConfig {
2016-09-23 11:25:35 +00:00
Name : podName ,
Labels : map [ string ] string { "name" : "additional" } ,
2017-03-15 21:42:19 +00:00
} ) , podName , false )
2016-10-18 13:00:38 +00:00
verifyResult ( cs , podsNeededForSaturation , 1 , ns )
2015-07-13 14:27:35 +00:00
} )
2017-08-21 03:24:27 +00:00
// This test verifies we don't allow scheduling of pods in a way that sum of local ephemeral storage limits of pods is greater than machines capacity.
// It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
2017-09-01 05:51:37 +00:00
It ( "validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]" , func ( ) {
2017-09-08 21:30:31 +00:00
framework . SkipUnlessServerVersionGTE ( localStorageVersion , f . ClientSet . Discovery ( ) )
2017-08-21 03:24:27 +00:00
nodeMaxAllocatable := int64 ( 0 )
nodeToAllocatableMap := make ( map [ string ] int64 )
for _ , node := range nodeList . Items {
allocatable , found := node . Status . Allocatable [ v1 . ResourceEphemeralStorage ]
Expect ( found ) . To ( Equal ( true ) )
nodeToAllocatableMap [ node . Name ] = allocatable . MilliValue ( )
if nodeMaxAllocatable < allocatable . MilliValue ( ) {
nodeMaxAllocatable = allocatable . MilliValue ( )
}
}
framework . WaitForStableCluster ( cs , masterNodes )
pods , err := cs . CoreV1 ( ) . Pods ( metav1 . NamespaceAll ) . List ( metav1 . ListOptions { } )
framework . ExpectNoError ( err )
for _ , pod := range pods . Items {
_ , found := nodeToAllocatableMap [ pod . Spec . NodeName ]
if found && pod . Status . Phase != v1 . PodSucceeded && pod . Status . Phase != v1 . PodFailed {
framework . Logf ( "Pod %v requesting local ephemeral resource =%vm on Node %v" , pod . Name , getRequestedStorageEphemeralStorage ( pod ) , pod . Spec . NodeName )
nodeToAllocatableMap [ pod . Spec . NodeName ] -= getRequestedStorageEphemeralStorage ( pod )
}
}
var podsNeededForSaturation int
milliEphemeralStoragePerPod := nodeMaxAllocatable / maxNumberOfPods
framework . Logf ( "Using pod capacity: %vm" , milliEphemeralStoragePerPod )
for name , leftAllocatable := range nodeToAllocatableMap {
framework . Logf ( "Node: %v has local ephemeral resource allocatable: %vm" , name , leftAllocatable )
podsNeededForSaturation += ( int ) ( leftAllocatable / milliEphemeralStoragePerPod )
}
By ( fmt . Sprintf ( "Starting additional %v Pods to fully saturate the cluster local ephemeral resource and trying to start another one" , podsNeededForSaturation ) )
// As the pods are distributed randomly among nodes,
// it can easily happen that all nodes are saturated
// and there is no need to create additional pods.
// StartPods requires at least one pod to replicate.
if podsNeededForSaturation > 0 {
framework . ExpectNoError ( testutils . StartPods ( cs , podsNeededForSaturation , ns , "overcommit" ,
* initPausePod ( f , pausePodConfig {
Name : "" ,
Labels : map [ string ] string { "name" : "" } ,
Resources : & v1 . ResourceRequirements {
Limits : v1 . ResourceList {
v1 . ResourceEphemeralStorage : * resource . NewMilliQuantity ( milliEphemeralStoragePerPod , "DecimalSI" ) ,
} ,
Requests : v1 . ResourceList {
v1 . ResourceEphemeralStorage : * resource . NewMilliQuantity ( milliEphemeralStoragePerPod , "DecimalSI" ) ,
} ,
} ,
} ) , true , framework . Logf ) )
}
podName := "additional-pod"
conf := pausePodConfig {
Name : podName ,
Labels : map [ string ] string { "name" : "additional" } ,
Resources : & v1 . ResourceRequirements {
Limits : v1 . ResourceList {
v1 . ResourceEphemeralStorage : * resource . NewMilliQuantity ( milliEphemeralStoragePerPod , "DecimalSI" ) ,
} ,
} ,
}
WaitForSchedulerAfterAction ( f , createPausePodAction ( f , conf ) , podName , false )
verifyResult ( cs , podsNeededForSaturation , 1 , ns )
} )
2017-09-28 07:44:04 +00:00
// This test verifies we don't allow scheduling of pods in a way that sum of
// limits of pods is greater than machines capacity.
// It assumes that cluster add-on pods stay stable and cannot be run in parallel
// with any other test that touches Nodes or Pods.
2015-07-13 14:27:35 +00:00
// It is so because we need to have precise control on what's running in the cluster.
2017-09-28 07:44:04 +00:00
// Test scenario:
// 1. Find the amount CPU resources on each node.
// 2. Create one pod with affinity to each node that uses 70% of the node CPU.
// 3. Wait for the pods to be scheduled.
// 4. Create another pod with no affinity to any node that need 50% of the largest node CPU.
// 5. Make sure this additional pod is not scheduled.
2017-10-23 18:30:05 +00:00
/ *
Testname : scheduler - resource - limits
Description : Ensure that scheduler accounts node resources correctly
and respects pods ' resource requirements during scheduling .
* /
2017-10-26 17:46:09 +00:00
framework . ConformanceIt ( "validates resource limits of pods that are allowed to run " , func ( ) {
2017-09-28 07:44:04 +00:00
framework . WaitForStableCluster ( cs , masterNodes )
2017-06-16 02:06:05 +00:00
nodeMaxAllocatable := int64 ( 0 )
nodeToAllocatableMap := make ( map [ string ] int64 )
2015-07-13 14:27:35 +00:00
for _ , node := range nodeList . Items {
2017-09-28 07:44:04 +00:00
nodeReady := false
for _ , condition := range node . Status . Conditions {
if condition . Type == v1 . NodeReady && condition . Status == v1 . ConditionTrue {
nodeReady = true
break
}
}
if ! nodeReady {
continue
}
// Apply node label to each node
framework . AddOrUpdateLabelOnNode ( cs , node . Name , "node" , node . Name )
framework . ExpectNodeHasLabel ( cs , node . Name , "node" , node . Name )
// Find allocatable amount of CPU.
2017-08-15 07:56:18 +00:00
allocatable , found := node . Status . Allocatable [ v1 . ResourceCPU ]
2015-07-13 14:27:35 +00:00
Expect ( found ) . To ( Equal ( true ) )
2017-06-16 02:06:05 +00:00
nodeToAllocatableMap [ node . Name ] = allocatable . MilliValue ( )
if nodeMaxAllocatable < allocatable . MilliValue ( ) {
nodeMaxAllocatable = allocatable . MilliValue ( )
2016-07-19 15:11:23 +00:00
}
2015-07-13 14:27:35 +00:00
}
2017-09-28 07:44:04 +00:00
// Clean up added labels after this test.
defer func ( ) {
for nodeName := range nodeToAllocatableMap {
framework . RemoveLabelOffNode ( cs , nodeName , "node" )
}
} ( )
2015-07-13 14:27:35 +00:00
2017-08-13 09:07:29 +00:00
pods , err := cs . CoreV1 ( ) . Pods ( metav1 . NamespaceAll ) . List ( metav1 . ListOptions { } )
2016-04-07 17:21:31 +00:00
framework . ExpectNoError ( err )
2015-07-13 14:27:35 +00:00
for _ , pod := range pods . Items {
2017-06-16 02:06:05 +00:00
_ , found := nodeToAllocatableMap [ pod . Spec . NodeName ]
2016-11-18 20:55:17 +00:00
if found && pod . Status . Phase != v1 . PodSucceeded && pod . Status . Phase != v1 . PodFailed {
2016-05-05 08:40:39 +00:00
framework . Logf ( "Pod %v requesting resource cpu=%vm on Node %v" , pod . Name , getRequestedCPU ( pod ) , pod . Spec . NodeName )
2017-06-16 02:06:05 +00:00
nodeToAllocatableMap [ pod . Spec . NodeName ] -= getRequestedCPU ( pod )
2015-08-14 07:23:10 +00:00
}
2015-07-13 14:27:35 +00:00
}
2017-09-28 07:44:04 +00:00
By ( "Starting Pods to consume most of the cluster CPU." )
// Create one pod per node that requires 70% of the node remaining CPU.
fillerPods := [ ] * v1 . Pod { }
for nodeName , cpu := range nodeToAllocatableMap {
requestedCPU := cpu * 7 / 10
fillerPods = append ( fillerPods , createPausePod ( f , pausePodConfig {
2017-11-05 16:01:17 +00:00
Name : "filler-pod-" + string ( uuid . NewUUID ( ) ) ,
2017-09-28 07:44:04 +00:00
Resources : & v1 . ResourceRequirements {
Limits : v1 . ResourceList {
v1 . ResourceCPU : * resource . NewMilliQuantity ( requestedCPU , "DecimalSI" ) ,
} ,
Requests : v1 . ResourceList {
v1 . ResourceCPU : * resource . NewMilliQuantity ( requestedCPU , "DecimalSI" ) ,
} ,
} ,
Affinity : & v1 . Affinity {
NodeAffinity : & v1 . NodeAffinity {
RequiredDuringSchedulingIgnoredDuringExecution : & v1 . NodeSelector {
NodeSelectorTerms : [ ] v1 . NodeSelectorTerm {
{
MatchExpressions : [ ] v1 . NodeSelectorRequirement {
{
Key : "node" ,
Operator : v1 . NodeSelectorOpIn ,
Values : [ ] string { nodeName } ,
} ,
} ,
} ,
} ,
2015-07-13 14:27:35 +00:00
} ,
} ,
2017-09-28 07:44:04 +00:00
} ,
} ) )
2016-05-06 09:58:35 +00:00
}
2017-09-28 07:44:04 +00:00
// Wait for filler pods to schedule.
for _ , pod := range fillerPods {
framework . ExpectNoError ( framework . WaitForPodRunningInNamespace ( cs , pod ) )
}
By ( "Creating another pod that requires unavailable amount of CPU." )
// Create another pod that requires 50% of the largest node CPU resources.
// This pod should remain pending as at least 70% of CPU of other nodes in
// the cluster are already consumed.
2015-07-13 14:27:35 +00:00
podName := "additional-pod"
2017-03-15 21:42:19 +00:00
conf := pausePodConfig {
2016-09-23 11:25:35 +00:00
Name : podName ,
Labels : map [ string ] string { "name" : "additional" } ,
2016-11-18 20:55:17 +00:00
Resources : & v1 . ResourceRequirements {
Limits : v1 . ResourceList {
2017-09-28 07:44:04 +00:00
v1 . ResourceCPU : * resource . NewMilliQuantity ( nodeMaxAllocatable * 5 / 10 , "DecimalSI" ) ,
2015-07-13 14:27:35 +00:00
} ,
} ,
2017-03-15 21:42:19 +00:00
}
WaitForSchedulerAfterAction ( f , createPausePodAction ( f , conf ) , podName , false )
2017-09-28 07:44:04 +00:00
verifyResult ( cs , len ( fillerPods ) , 1 , ns )
2015-07-13 14:27:35 +00:00
} )
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
// nonempty Selector set.
2017-10-23 18:30:05 +00:00
/ *
Testname : scheduler - node - selector - not - matching
Description : Ensure that scheduler respects the NodeSelector field of
PodSpec during scheduling ( when it does not match any node ) .
* /
2017-10-26 17:46:09 +00:00
framework . ConformanceIt ( "validates that NodeSelector is respected if not matching " , func ( ) {
2015-07-13 14:27:35 +00:00
By ( "Trying to schedule Pod with nonempty NodeSelector." )
podName := "restricted-pod"
2016-10-18 13:00:38 +00:00
framework . WaitForStableCluster ( cs , masterNodes )
2015-08-14 07:23:10 +00:00
2017-03-15 21:42:19 +00:00
conf := pausePodConfig {
2016-09-23 11:25:35 +00:00
Name : podName ,
Labels : map [ string ] string { "name" : "restricted" } ,
NodeSelector : map [ string ] string {
"label" : "nonempty" ,
2015-07-13 14:27:35 +00:00
} ,
2017-03-15 21:42:19 +00:00
}
2015-07-13 14:27:35 +00:00
2017-03-15 21:42:19 +00:00
WaitForSchedulerAfterAction ( f , createPausePodAction ( f , conf ) , podName , false )
2016-10-18 13:00:38 +00:00
verifyResult ( cs , 0 , 1 , ns )
2015-07-13 14:27:35 +00:00
} )
2015-09-11 20:48:37 +00:00
2016-05-04 06:50:31 +00:00
It ( "validates that a pod with an invalid NodeAffinity is rejected" , func ( ) {
2016-01-26 23:03:18 +00:00
By ( "Trying to launch a pod with an invalid Affinity data." )
podName := "without-label"
2017-03-15 21:42:19 +00:00
_ , err := cs . CoreV1 ( ) . Pods ( ns ) . Create ( initPausePod ( f , pausePodConfig {
2016-09-23 11:25:35 +00:00
Name : podName ,
2016-12-08 15:14:21 +00:00
Affinity : & v1 . Affinity {
2016-11-30 16:51:12 +00:00
NodeAffinity : & v1 . NodeAffinity {
RequiredDuringSchedulingIgnoredDuringExecution : & v1 . NodeSelector {
NodeSelectorTerms : [ ] v1 . NodeSelectorTerm {
{
MatchExpressions : [ ] v1 . NodeSelectorRequirement { } ,
} ,
} ,
2016-01-26 23:03:18 +00:00
} ,
2016-11-30 16:51:12 +00:00
} ,
} ,
2016-09-23 11:25:35 +00:00
} ) )
2016-01-26 23:03:18 +00:00
if err == nil || ! errors . IsInvalid ( err ) {
2016-04-07 17:21:31 +00:00
framework . Failf ( "Expect error of invalid, got : %v" , err )
2016-01-26 23:03:18 +00:00
}
} )
2017-10-23 18:30:05 +00:00
/ *
Testname : scheduler - node - selector - matching
Description : Ensure that scheduler respects the NodeSelector field
of PodSpec during scheduling ( when it matches ) .
* /
2017-10-26 17:46:09 +00:00
framework . ConformanceIt ( "validates that NodeSelector is respected if matching " , func ( ) {
2017-03-12 21:49:33 +00:00
nodeName := GetNodeThatCanRunPod ( f )
2015-09-11 20:48:37 +00:00
By ( "Trying to apply a random label on the found node." )
2016-07-26 15:13:18 +00:00
k := fmt . Sprintf ( "kubernetes.io/e2e-%s" , string ( uuid . NewUUID ( ) ) )
2015-09-11 20:48:37 +00:00
v := "42"
2016-10-10 10:36:00 +00:00
framework . AddOrUpdateLabelOnNode ( cs , nodeName , k , v )
framework . ExpectNodeHasLabel ( cs , nodeName , k , v )
defer framework . RemoveLabelOffNode ( cs , nodeName , k )
2015-09-11 20:48:37 +00:00
By ( "Trying to relaunch the pod, now with labels." )
labelPodName := "with-labels"
2017-06-13 11:48:42 +00:00
createPausePod ( f , pausePodConfig {
2016-09-23 11:25:35 +00:00
Name : labelPodName ,
NodeSelector : map [ string ] string {
k : v ,
2015-09-11 20:48:37 +00:00
} ,
} )
2015-10-16 10:22:11 +00:00
// check that pod got scheduled. We intentionally DO NOT check that the
// pod is running because this will create a race condition with the
// kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod.
2017-02-08 18:41:45 +00:00
framework . ExpectNoError ( framework . WaitForPodNotPending ( cs , ns , labelPodName ) )
2017-08-13 09:07:29 +00:00
labelPod , err := cs . CoreV1 ( ) . Pods ( ns ) . Get ( labelPodName , metav1 . GetOptions { } )
2016-04-07 17:21:31 +00:00
framework . ExpectNoError ( err )
2015-09-11 20:48:37 +00:00
Expect ( labelPod . Spec . NodeName ) . To ( Equal ( nodeName ) )
} )
2016-01-26 23:03:18 +00:00
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
// non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.
2016-05-04 06:50:31 +00:00
It ( "validates that NodeAffinity is respected if not matching" , func ( ) {
2016-01-26 23:03:18 +00:00
By ( "Trying to schedule Pod with nonempty NodeSelector." )
podName := "restricted-pod"
2016-10-18 13:00:38 +00:00
framework . WaitForStableCluster ( cs , masterNodes )
2016-01-26 23:03:18 +00:00
2017-03-15 21:42:19 +00:00
conf := pausePodConfig {
2016-09-23 11:25:35 +00:00
Name : podName ,
2016-12-08 15:14:21 +00:00
Affinity : & v1 . Affinity {
2016-11-30 16:51:12 +00:00
NodeAffinity : & v1 . NodeAffinity {
RequiredDuringSchedulingIgnoredDuringExecution : & v1 . NodeSelector {
NodeSelectorTerms : [ ] v1 . NodeSelectorTerm {
2016-01-26 23:03:18 +00:00
{
2016-11-30 16:51:12 +00:00
MatchExpressions : [ ] v1 . NodeSelectorRequirement {
{
Key : "foo" ,
Operator : v1 . NodeSelectorOpIn ,
Values : [ ] string { "bar" , "value2" } ,
} ,
} ,
} , {
MatchExpressions : [ ] v1 . NodeSelectorRequirement {
{
Key : "diffkey" ,
Operator : v1 . NodeSelectorOpIn ,
Values : [ ] string { "wrong" , "value2" } ,
} ,
} ,
2016-01-26 23:03:18 +00:00
} ,
2016-11-30 16:51:12 +00:00
} ,
} ,
} ,
} ,
2016-09-23 11:25:35 +00:00
Labels : map [ string ] string { "name" : "restricted" } ,
2017-03-15 21:42:19 +00:00
}
WaitForSchedulerAfterAction ( f , createPausePodAction ( f , conf ) , podName , false )
2016-10-18 13:00:38 +00:00
verifyResult ( cs , 0 , 1 , ns )
2016-01-26 23:03:18 +00:00
} )
// Keep the same steps with the test on NodeSelector,
2017-07-18 11:26:41 +00:00
// but specify Affinity in Pod.Spec.Affinity, instead of NodeSelector.
2016-05-04 06:50:31 +00:00
It ( "validates that required NodeAffinity setting is respected if matching" , func ( ) {
2017-03-12 21:49:33 +00:00
nodeName := GetNodeThatCanRunPod ( f )
2016-01-26 23:03:18 +00:00
By ( "Trying to apply a random label on the found node." )
2016-07-26 15:13:18 +00:00
k := fmt . Sprintf ( "kubernetes.io/e2e-%s" , string ( uuid . NewUUID ( ) ) )
2016-01-26 23:03:18 +00:00
v := "42"
2016-10-10 10:36:00 +00:00
framework . AddOrUpdateLabelOnNode ( cs , nodeName , k , v )
framework . ExpectNodeHasLabel ( cs , nodeName , k , v )
defer framework . RemoveLabelOffNode ( cs , nodeName , k )
2016-01-26 23:03:18 +00:00
By ( "Trying to relaunch the pod, now with labels." )
labelPodName := "with-labels"
2017-06-13 11:48:42 +00:00
createPausePod ( f , pausePodConfig {
2016-09-23 11:25:35 +00:00
Name : labelPodName ,
2016-12-08 15:14:21 +00:00
Affinity : & v1 . Affinity {
2016-11-30 16:51:12 +00:00
NodeAffinity : & v1 . NodeAffinity {
RequiredDuringSchedulingIgnoredDuringExecution : & v1 . NodeSelector {
NodeSelectorTerms : [ ] v1 . NodeSelectorTerm {
{
MatchExpressions : [ ] v1 . NodeSelectorRequirement {
{
Key : k ,
Operator : v1 . NodeSelectorOpIn ,
Values : [ ] string { v } ,
} ,
} ,
} ,
} ,
} ,
} ,
} ,
2016-01-26 23:03:18 +00:00
} )
// check that pod got scheduled. We intentionally DO NOT check that the
// pod is running because this will create a race condition with the
// kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod.
2017-02-08 18:41:45 +00:00
framework . ExpectNoError ( framework . WaitForPodNotPending ( cs , ns , labelPodName ) )
2017-03-15 21:42:19 +00:00
labelPod , err := cs . CoreV1 ( ) . Pods ( ns ) . Get ( labelPodName , metav1 . GetOptions { } )
2016-04-07 17:21:31 +00:00
framework . ExpectNoError ( err )
2016-01-26 23:03:18 +00:00
Expect ( labelPod . Spec . NodeName ) . To ( Equal ( nodeName ) )
} )
2016-03-31 03:42:57 +00:00
// 1. Run a pod to get an available node, then delete the pod
// 2. Taint the node with a random taint
// 3. Try to relaunch the pod with tolerations tolerate the taints on node,
// and the pod's nodeName specified to the name of node found in step 1
It ( "validates that taints-tolerations is respected if matching" , func ( ) {
2016-09-23 11:25:35 +00:00
nodeName := getNodeThatCanRunPodWithoutToleration ( f )
2016-03-31 03:42:57 +00:00
By ( "Trying to apply a random taint on the found node." )
2016-11-18 20:55:17 +00:00
testTaint := v1 . Taint {
2016-08-30 07:11:49 +00:00
Key : fmt . Sprintf ( "kubernetes.io/e2e-taint-key-%s" , string ( uuid . NewUUID ( ) ) ) ,
Value : "testing-taint-value" ,
2016-11-18 20:55:17 +00:00
Effect : v1 . TaintEffectNoSchedule ,
2016-08-30 07:11:49 +00:00
}
2016-10-18 13:00:38 +00:00
framework . AddOrUpdateTaintOnNode ( cs , nodeName , testTaint )
2017-02-06 12:59:50 +00:00
framework . ExpectNodeHasTaint ( cs , nodeName , & testTaint )
2016-10-18 13:00:38 +00:00
defer framework . RemoveTaintOffNode ( cs , nodeName , testTaint )
2016-03-31 03:42:57 +00:00
By ( "Trying to apply a random label on the found node." )
2016-07-26 15:13:18 +00:00
labelKey := fmt . Sprintf ( "kubernetes.io/e2e-label-key-%s" , string ( uuid . NewUUID ( ) ) )
2016-03-31 03:42:57 +00:00
labelValue := "testing-label-value"
2016-10-10 10:36:00 +00:00
framework . AddOrUpdateLabelOnNode ( cs , nodeName , labelKey , labelValue )
framework . ExpectNodeHasLabel ( cs , nodeName , labelKey , labelValue )
defer framework . RemoveLabelOffNode ( cs , nodeName , labelKey )
2016-03-31 03:42:57 +00:00
By ( "Trying to relaunch the pod, now with tolerations." )
tolerationPodName := "with-tolerations"
2017-06-13 11:48:42 +00:00
createPausePod ( f , pausePodConfig {
2017-02-07 14:08:45 +00:00
Name : tolerationPodName ,
Tolerations : [ ] v1 . Toleration { { Key : testTaint . Key , Value : testTaint . Value , Effect : testTaint . Effect } } ,
2016-09-23 11:25:35 +00:00
NodeSelector : map [ string ] string { labelKey : labelValue } ,
2016-03-31 03:42:57 +00:00
} )
// check that pod got scheduled. We intentionally DO NOT check that the
// pod is running because this will create a race condition with the
// kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new taint yet. The
// kubelet will then refuse to launch the pod.
2017-02-08 18:41:45 +00:00
framework . ExpectNoError ( framework . WaitForPodNotPending ( cs , ns , tolerationPodName ) )
2017-08-13 09:07:29 +00:00
deployedPod , err := cs . CoreV1 ( ) . Pods ( ns ) . Get ( tolerationPodName , metav1 . GetOptions { } )
2016-03-31 03:42:57 +00:00
framework . ExpectNoError ( err )
Expect ( deployedPod . Spec . NodeName ) . To ( Equal ( nodeName ) )
} )
// 1. Run a pod to get an available node, then delete the pod
// 2. Taint the node with a random taint
// 3. Try to relaunch the pod still no tolerations,
// and the pod's nodeName specified to the name of node found in step 1
It ( "validates that taints-tolerations is respected if not matching" , func ( ) {
2016-09-23 11:25:35 +00:00
nodeName := getNodeThatCanRunPodWithoutToleration ( f )
2016-03-31 03:42:57 +00:00
By ( "Trying to apply a random taint on the found node." )
2016-11-18 20:55:17 +00:00
testTaint := v1 . Taint {
2016-08-30 07:11:49 +00:00
Key : fmt . Sprintf ( "kubernetes.io/e2e-taint-key-%s" , string ( uuid . NewUUID ( ) ) ) ,
Value : "testing-taint-value" ,
2016-11-18 20:55:17 +00:00
Effect : v1 . TaintEffectNoSchedule ,
2016-08-30 07:11:49 +00:00
}
2016-10-18 13:00:38 +00:00
framework . AddOrUpdateTaintOnNode ( cs , nodeName , testTaint )
2017-02-06 12:59:50 +00:00
framework . ExpectNodeHasTaint ( cs , nodeName , & testTaint )
2016-10-18 13:00:38 +00:00
defer framework . RemoveTaintOffNode ( cs , nodeName , testTaint )
2016-03-31 03:42:57 +00:00
By ( "Trying to apply a random label on the found node." )
2016-07-26 15:13:18 +00:00
labelKey := fmt . Sprintf ( "kubernetes.io/e2e-label-key-%s" , string ( uuid . NewUUID ( ) ) )
2016-03-31 03:42:57 +00:00
labelValue := "testing-label-value"
2016-10-10 10:36:00 +00:00
framework . AddOrUpdateLabelOnNode ( cs , nodeName , labelKey , labelValue )
framework . ExpectNodeHasLabel ( cs , nodeName , labelKey , labelValue )
defer framework . RemoveLabelOffNode ( cs , nodeName , labelKey )
2016-03-31 03:42:57 +00:00
By ( "Trying to relaunch the pod, still no tolerations." )
podNameNoTolerations := "still-no-tolerations"
2017-03-15 21:42:19 +00:00
conf := pausePodConfig {
2016-09-23 11:25:35 +00:00
Name : podNameNoTolerations ,
NodeSelector : map [ string ] string { labelKey : labelValue } ,
2017-03-15 21:42:19 +00:00
}
2016-07-20 14:42:54 +00:00
2017-03-15 21:42:19 +00:00
WaitForSchedulerAfterAction ( f , createPausePodAction ( f , conf ) , podNameNoTolerations , false )
2016-10-18 13:00:38 +00:00
verifyResult ( cs , 0 , 1 , ns )
2016-07-20 14:42:54 +00:00
By ( "Removing taint off the node" )
2017-03-15 21:42:19 +00:00
WaitForSchedulerAfterAction ( f , removeTaintFromNodeAction ( cs , nodeName , testTaint ) , podNameNoTolerations , true )
2016-10-18 13:00:38 +00:00
verifyResult ( cs , 1 , 0 , ns )
2016-03-31 03:42:57 +00:00
} )
2017-10-11 03:45:25 +00:00
It ( "validates that there is no conflict between pods with same hostPort but different hostIP and protocol" , func ( ) {
nodeName := GetNodeThatCanRunPod ( f )
// use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not
By ( "Trying to apply a random label on the found node." )
k := fmt . Sprintf ( "kubernetes.io/e2e-%s" , string ( uuid . NewUUID ( ) ) )
v := "90"
nodeSelector := make ( map [ string ] string )
nodeSelector [ k ] = v
framework . AddOrUpdateLabelOnNode ( cs , nodeName , k , v )
framework . ExpectNodeHasLabel ( cs , nodeName , k , v )
defer framework . RemoveLabelOffNode ( cs , nodeName , k )
By ( "Trying to create a pod(pod1) with hostport 80 and hostIP 127.0.0.1 and expect scheduled" )
creatHostPortPodOnNode ( f , "pod1" , ns , "127.0.0.1" , v1 . ProtocolTCP , nodeSelector , true )
By ( "Trying to create another pod(pod2) with hostport 80 but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled" )
creatHostPortPodOnNode ( f , "pod2" , ns , "127.0.0.2" , v1 . ProtocolTCP , nodeSelector , true )
By ( "Trying to create a third pod(pod3) with hostport 80, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides" )
creatHostPortPodOnNode ( f , "pod3" , ns , "127.0.0.2" , v1 . ProtocolUDP , nodeSelector , true )
} )
It ( "validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP" , func ( ) {
nodeName := GetNodeThatCanRunPod ( f )
// use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not
By ( "Trying to apply a random label on the found node." )
k := fmt . Sprintf ( "kubernetes.io/e2e-%s" , string ( uuid . NewUUID ( ) ) )
v := "95"
nodeSelector := make ( map [ string ] string )
nodeSelector [ k ] = v
framework . AddOrUpdateLabelOnNode ( cs , nodeName , k , v )
framework . ExpectNodeHasLabel ( cs , nodeName , k , v )
defer framework . RemoveLabelOffNode ( cs , nodeName , k )
By ( "Trying to create a pod(pod4) with hostport 80 and hostIP 0.0.0.0(empty string here) and expect scheduled" )
creatHostPortPodOnNode ( f , "pod4" , ns , "" , v1 . ProtocolTCP , nodeSelector , true )
By ( "Trying to create another pod(pod5) with hostport 80 but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled" )
creatHostPortPodOnNode ( f , "pod5" , ns , "127.0.0.1" , v1 . ProtocolTCP , nodeSelector , false )
} )
2015-07-13 14:27:35 +00:00
} )
2016-09-23 11:25:35 +00:00
2016-11-18 20:55:17 +00:00
func initPausePod ( f * framework . Framework , conf pausePodConfig ) * v1 . Pod {
pod := & v1 . Pod {
2017-01-17 03:38:19 +00:00
ObjectMeta : metav1 . ObjectMeta {
2017-02-15 09:00:50 +00:00
Name : conf . Name ,
Labels : conf . Labels ,
Annotations : conf . Annotations ,
OwnerReferences : conf . OwnerReferences ,
2016-09-23 11:25:35 +00:00
} ,
2016-11-18 20:55:17 +00:00
Spec : v1 . PodSpec {
2016-09-23 11:25:35 +00:00
NodeSelector : conf . NodeSelector ,
2016-12-08 15:14:21 +00:00
Affinity : conf . Affinity ,
2016-11-18 20:55:17 +00:00
Containers : [ ] v1 . Container {
2016-09-23 11:25:35 +00:00
{
2017-03-12 21:49:33 +00:00
Name : conf . Name ,
2016-10-18 13:00:38 +00:00
Image : framework . GetPauseImageName ( f . ClientSet ) ,
2017-02-15 09:00:50 +00:00
Ports : conf . Ports ,
2016-09-23 11:25:35 +00:00
} ,
} ,
2017-08-10 01:15:40 +00:00
Tolerations : conf . Tolerations ,
NodeName : conf . NodeName ,
PriorityClassName : conf . PriorityClassName ,
2016-09-23 11:25:35 +00:00
} ,
}
if conf . Resources != nil {
pod . Spec . Containers [ 0 ] . Resources = * conf . Resources
}
return pod
}
2016-11-18 20:55:17 +00:00
func createPausePod ( f * framework . Framework , conf pausePodConfig ) * v1 . Pod {
2017-08-13 09:07:29 +00:00
pod , err := f . ClientSet . CoreV1 ( ) . Pods ( f . Namespace . Name ) . Create ( initPausePod ( f , conf ) )
2016-09-23 11:25:35 +00:00
framework . ExpectNoError ( err )
return pod
}
2016-11-18 20:55:17 +00:00
func runPausePod ( f * framework . Framework , conf pausePodConfig ) * v1 . Pod {
2016-09-23 11:25:35 +00:00
pod := createPausePod ( f , conf )
2016-10-18 13:00:38 +00:00
framework . ExpectNoError ( framework . WaitForPodRunningInNamespace ( f . ClientSet , pod ) )
2017-08-13 09:07:29 +00:00
pod , err := f . ClientSet . CoreV1 ( ) . Pods ( f . Namespace . Name ) . Get ( conf . Name , metav1 . GetOptions { } )
2016-09-23 11:25:35 +00:00
framework . ExpectNoError ( err )
return pod
}
func runPodAndGetNodeName ( f * framework . Framework , conf pausePodConfig ) string {
// launch a pod to find a node which can launch a pod. We intentionally do
// not just take the node list and choose the first of them. Depending on the
// cluster and the scheduler it might be that a "normal" pod cannot be
// scheduled onto it.
pod := runPausePod ( f , conf )
By ( "Explicitly delete pod here to free the resource it takes." )
2017-08-13 09:07:29 +00:00
err := f . ClientSet . CoreV1 ( ) . Pods ( f . Namespace . Name ) . Delete ( pod . Name , metav1 . NewDeleteOptions ( 0 ) )
2016-09-23 11:25:35 +00:00
framework . ExpectNoError ( err )
return pod . Spec . NodeName
}
2016-11-18 20:55:17 +00:00
func getRequestedCPU ( pod v1 . Pod ) int64 {
2016-09-23 11:25:35 +00:00
var result int64
for _ , container := range pod . Spec . Containers {
result += container . Resources . Requests . Cpu ( ) . MilliValue ( )
}
return result
}
2017-08-21 03:24:27 +00:00
func getRequestedStorageEphemeralStorage ( pod v1 . Pod ) int64 {
var result int64
for _ , container := range pod . Spec . Containers {
result += container . Resources . Requests . StorageEphemeral ( ) . MilliValue ( )
}
return result
}
2017-03-15 21:42:19 +00:00
// removeTaintFromNodeAction returns a closure that removes the given taint
// from the given node upon invocation.
func removeTaintFromNodeAction ( cs clientset . Interface , nodeName string , testTaint v1 . Taint ) common . Action {
return func ( ) error {
framework . RemoveTaintOffNode ( cs , nodeName , testTaint )
return nil
}
}
// createPausePodAction returns a closure that creates a pause pod upon invocation.
func createPausePodAction ( f * framework . Framework , conf pausePodConfig ) common . Action {
return func ( ) error {
_ , err := f . ClientSet . CoreV1 ( ) . Pods ( f . Namespace . Name ) . Create ( initPausePod ( f , conf ) )
return err
}
}
// WaitForSchedulerAfterAction performs the provided action and then waits for
// scheduler to act on the given pod.
func WaitForSchedulerAfterAction ( f * framework . Framework , action common . Action , podName string , expectSuccess bool ) {
predicate := scheduleFailureEvent ( podName )
if expectSuccess {
predicate = scheduleSuccessEvent ( podName , "" /* any node */ )
}
success , err := common . ObserveEventAfterAction ( f , predicate , action )
Expect ( err ) . NotTo ( HaveOccurred ( ) )
Expect ( success ) . To ( Equal ( true ) )
2016-09-23 11:25:35 +00:00
}
// TODO: upgrade calls in PodAffinity tests when we're able to run them
2016-10-18 13:00:38 +00:00
func verifyResult ( c clientset . Interface , expectedScheduled int , expectedNotScheduled int , ns string ) {
2017-03-15 21:42:19 +00:00
allPods , err := c . CoreV1 ( ) . Pods ( ns ) . List ( metav1 . ListOptions { } )
2016-09-23 11:25:35 +00:00
framework . ExpectNoError ( err )
scheduledPods , notScheduledPods := framework . GetPodsScheduled ( masterNodes , allPods )
printed := false
printOnce := func ( msg string ) string {
if ! printed {
printed = true
return msg
} else {
return ""
}
}
Expect ( len ( notScheduledPods ) ) . To ( Equal ( expectedNotScheduled ) , printOnce ( fmt . Sprintf ( "Not scheduled Pods: %#v" , notScheduledPods ) ) )
Expect ( len ( scheduledPods ) ) . To ( Equal ( expectedScheduled ) , printOnce ( fmt . Sprintf ( "Scheduled Pods: %#v" , scheduledPods ) ) )
}
2017-02-15 09:00:50 +00:00
// verifyReplicasResult is wrapper of verifyResult for a group pods with same "name: labelName" label, which means they belong to same RC
func verifyReplicasResult ( c clientset . Interface , expectedScheduled int , expectedNotScheduled int , ns string , labelName string ) {
allPods := getPodsByLabels ( c , ns , map [ string ] string { "name" : labelName } )
scheduledPods , notScheduledPods := framework . GetPodsScheduled ( masterNodes , allPods )
printed := false
printOnce := func ( msg string ) string {
if ! printed {
printed = true
return msg
} else {
return ""
}
}
Expect ( len ( notScheduledPods ) ) . To ( Equal ( expectedNotScheduled ) , printOnce ( fmt . Sprintf ( "Not scheduled Pods: %#v" , notScheduledPods ) ) )
Expect ( len ( scheduledPods ) ) . To ( Equal ( expectedScheduled ) , printOnce ( fmt . Sprintf ( "Scheduled Pods: %#v" , scheduledPods ) ) )
}
func getPodsByLabels ( c clientset . Interface , ns string , labelsMap map [ string ] string ) * v1 . PodList {
selector := labels . SelectorFromSet ( labels . Set ( labelsMap ) )
2017-08-13 09:07:29 +00:00
allPods , err := c . CoreV1 ( ) . Pods ( ns ) . List ( metav1 . ListOptions { LabelSelector : selector . String ( ) } )
2017-02-15 09:00:50 +00:00
framework . ExpectNoError ( err )
return allPods
}
2016-09-23 11:25:35 +00:00
func runAndKeepPodWithLabelAndGetNodeName ( f * framework . Framework ) ( string , string ) {
// launch a pod to find a node which can launch a pod. We intentionally do
// not just take the node list and choose the first of them. Depending on the
// cluster and the scheduler it might be that a "normal" pod cannot be
// scheduled onto it.
By ( "Trying to launch a pod with a label to get a node which can launch it." )
pod := runPausePod ( f , pausePodConfig {
Name : "with-label-" + string ( uuid . NewUUID ( ) ) ,
Labels : map [ string ] string { "security" : "S1" } ,
} )
return pod . Spec . NodeName , pod . Name
}
2017-03-12 21:49:33 +00:00
func GetNodeThatCanRunPod ( f * framework . Framework ) string {
2016-09-23 11:25:35 +00:00
By ( "Trying to launch a pod without a label to get a node which can launch it." )
return runPodAndGetNodeName ( f , pausePodConfig { Name : "without-label" } )
}
func getNodeThatCanRunPodWithoutToleration ( f * framework . Framework ) string {
By ( "Trying to launch a pod without a toleration to get a node which can launch it." )
return runPodAndGetNodeName ( f , pausePodConfig { Name : "without-toleration" } )
}
2017-03-12 21:49:33 +00:00
func CreateHostPortPods ( f * framework . Framework , id string , replicas int , expectRunning bool ) {
By ( fmt . Sprintf ( "Running RC which reserves host port" ) )
config := & testutils . RCConfig {
Client : f . ClientSet ,
InternalClient : f . InternalClientset ,
Name : id ,
Namespace : f . Namespace . Name ,
Timeout : defaultTimeout ,
Image : framework . GetPauseImageName ( f . ClientSet ) ,
Replicas : replicas ,
HostPorts : map [ string ] int { "port1" : 4321 } ,
}
err := framework . RunRC ( * config )
if expectRunning {
framework . ExpectNoError ( err )
}
}
2017-10-11 03:45:25 +00:00
// create pod which using hostport on the specified node according to the nodeSelector
func creatHostPortPodOnNode ( f * framework . Framework , podName , ns , hostIP string , protocol v1 . Protocol , nodeSelector map [ string ] string , expectScheduled bool ) {
createPausePod ( f , pausePodConfig {
Name : podName ,
Ports : [ ] v1 . ContainerPort {
{
HostPort : 80 ,
ContainerPort : 80 ,
Protocol : protocol ,
HostIP : hostIP ,
} ,
} ,
NodeSelector : nodeSelector ,
} )
err := framework . WaitForPodNotPending ( f . ClientSet , ns , podName )
if expectScheduled {
framework . ExpectNoError ( err )
}
}