mirror of https://github.com/k3s-io/k3s
Merge pull request #74125 from tuanvcw/fixing_typo
Fix many typos in both code and commentspull/564/head
commit
646145f578
|
@ -114,7 +114,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
|||
|
||||
// Create a replication controller for a service
|
||||
// that serves its hostname.
|
||||
// The source for the Docker containter kubernetes/serve_hostname is
|
||||
// The source for the Docker container kubernetes/serve_hostname is
|
||||
// in contrib/for-demos/serve_hostname
|
||||
By(fmt.Sprintf("Creating replication controller %s", name))
|
||||
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image)
|
||||
|
|
|
@ -43,7 +43,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
|||
/*
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, pod name
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
|
@ -73,7 +73,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
|||
/*
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, file mode 0400
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the Pod name with the file mode set to -r--------. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the Pod name with the file mode set to -r--------. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func() {
|
||||
|
@ -118,7 +118,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
|||
/*
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, update label
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains list of items for each of the Pod labels. The container runtime MUST be able to access Pod labels from the specified path on the mounted volume. Update the labels by adding a new label to the running Pod. The new label MUST be available from the mounted volume.
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains list of items for each of the Pod labels. The container runtime MUST be able to access Pod labels from the specified path on the mounted volume. Update the labels by adding a new label to the running Pod. The new label MUST be available from the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
|
||||
labels := map[string]string{}
|
||||
|
@ -150,7 +150,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
|||
/*
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, update annotations
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains list of items for each of the Pod annotations. The container runtime MUST be able to access Pod annotations from the specified path on the mounted volume. Update the annotations by adding a new annotation to the running Pod. The new annotation MUST be available from the mounted volume.
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains list of items for each of the Pod annotations. The container runtime MUST be able to access Pod annotations from the specified path on the mounted volume. Update the annotations by adding a new annotation to the running Pod. The new annotation MUST be available from the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
|
||||
annotations := map[string]string{}
|
||||
|
@ -184,7 +184,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
|||
/*
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, CPU limits
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU limits. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume.
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU limits. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
|
@ -198,7 +198,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
|||
/*
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, memory limits
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory limits. The container runtime MUST be able to access memory limits from the specified path on the mounted volume.
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory limits. The container runtime MUST be able to access memory limits from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
|
@ -212,7 +212,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
|||
/*
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, CPU request
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU request. The container runtime MUST be able to access CPU request from the specified path on the mounted volume.
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU request. The container runtime MUST be able to access CPU request from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
|
@ -226,7 +226,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
|||
/*
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, memory request
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory request. The container runtime MUST be able to access memory request from the specified path on the mounted volume.
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory request. The container runtime MUST be able to access memory request from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
|
@ -240,7 +240,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
|||
/*
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, CPU limit, default node allocatable
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU limits. CPU limits is not specified for the container. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume and the value MUST be default node allocatable.
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU limits. CPU limits is not specified for the container. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume and the value MUST be default node allocatable.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
|
@ -252,7 +252,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
|||
/*
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, memory limit, default node allocatable
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory limits. memory limits is not specified for the container. The container runtime MUST be able to access memory limits from the specified path on the mounted volume and the value MUST be default node allocatable.
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory limits. memory limits is not specified for the container. The container runtime MUST be able to access memory limits from the specified path on the mounted volume and the value MUST be default node allocatable.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
|
|
|
@ -404,7 +404,7 @@ var _ = Describe("[sig-storage] Projected secret", func() {
|
|||
})
|
||||
|
||||
//The secret is in pending during volume creation until the secret objects are available
|
||||
//or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timout exception unless it is marked optional.
|
||||
//or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timeout exception unless it is marked optional.
|
||||
//Slow (~5 mins)
|
||||
It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/projected-secret-volumes"
|
||||
|
|
|
@ -716,7 +716,7 @@ type PodStateVerification struct {
|
|||
|
||||
// Optional: only pods passing this function will pass the filter
|
||||
// Verify a pod.
|
||||
// As an optimization, in addition to specfying filter (boolean),
|
||||
// As an optimization, in addition to specifying filter (boolean),
|
||||
// this function allows specifying an error as well.
|
||||
// The error indicates that the polling of the pod spectrum should stop.
|
||||
Verify func(v1.Pod) (bool, error)
|
||||
|
@ -856,7 +856,7 @@ func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration)
|
|||
}
|
||||
}
|
||||
|
||||
// ForEach runs a function against every verifiable pod. Be warned that this doesn't wait for "n" pods to verifiy,
|
||||
// ForEach runs a function against every verifiable pod. Be warned that this doesn't wait for "n" pods to verify,
|
||||
// so it may return very quickly if you have strict pod state requirements.
|
||||
//
|
||||
// For example, if you require at least 5 pods to be running before your test will pass,
|
||||
|
|
|
@ -718,7 +718,7 @@ func (j *TestJig) VerifyURL(route, host string, iterations int, interval time.Du
|
|||
framework.Logf(b)
|
||||
return err
|
||||
}
|
||||
j.Logger.Infof("Verfied %v with host %v %d times, sleeping for %v", route, host, i, interval)
|
||||
j.Logger.Infof("Verified %v with host %v %d times, sleeping for %v", route, host, i, interval)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -182,7 +182,7 @@ func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, paralle
|
|||
})
|
||||
}
|
||||
|
||||
// WaitForJobComplete uses c to wait for compeletions to complete for the Job jobName in namespace ns.
|
||||
// WaitForJobComplete uses c to wait for completions to complete for the Job jobName in namespace ns.
|
||||
func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions int32) error {
|
||||
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
|
|
|
@ -45,7 +45,7 @@ func RcByNamePort(name string, replicas int32, image string, port int, protocol
|
|||
}, gracePeriod)
|
||||
}
|
||||
|
||||
// RcByNameContainer returns a ReplicationControoler with specified name and container
|
||||
// RcByNameContainer returns a ReplicationController with specified name and container
|
||||
func RcByNameContainer(name string, replicas int32, image string, labels map[string]string, c v1.Container,
|
||||
gracePeriod *int64) *v1.ReplicationController {
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||
// TODO: uncomment the restart test once we have a way to synchronize
|
||||
// and know that the controller has resumed watching. If we delete
|
||||
// the ingress before the controller is ready we will leak.
|
||||
// By("restaring glbc")
|
||||
// By("restarting glbc")
|
||||
// restarter := NewRestartConfig(
|
||||
// framework.GetMasterHost(), "glbc", glbcHealthzPort, restartPollInterval, restartTimeout)
|
||||
// restarter.restart()
|
||||
|
|
|
@ -44,7 +44,7 @@ var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() {
|
|||
|
||||
scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig)
|
||||
if err := scaleFramework.PrepareScaleTest(); err != nil {
|
||||
framework.Failf("Unexpected error while preraring ingress scale test: %v", err)
|
||||
framework.Failf("Unexpected error while preparing ingress scale test: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ var _ = SIGDescribe("Service endpoints latency", func() {
|
|||
/*
|
||||
Release : v1.9
|
||||
Testname: Service endpoint latency, thresholds
|
||||
Description: Run 100 iterations of create service with the Pod running the pause image, measure the time it takes for creating the service and the endpoint with the service name is available. These durations are captured for 100 iterations, then the durations are sorted to compue 50th, 90th and 99th percentile. The single server latency MUST not exceed liberally set thresholds of 20s for 50th percentile and 50s for the 90th percentile.
|
||||
Description: Run 100 iterations of create service with the Pod running the pause image, measure the time it takes for creating the service and the endpoint with the service name is available. These durations are captured for 100 iterations, then the durations are sorted to compute 50th, 90th and 99th percentile. The single server latency MUST not exceed liberally set thresholds of 20s for 50th percentile and 50s for the 90th percentile.
|
||||
*/
|
||||
framework.ConformanceIt("should not be very high ", func() {
|
||||
const (
|
||||
|
|
|
@ -25,7 +25,7 @@ package perftype
|
|||
// DataItem is the data point.
|
||||
type DataItem struct {
|
||||
// Data is a map from bucket to real data point (e.g. "Perc90" -> 23.5). Notice
|
||||
// that all data items with the same label conbination should have the same buckets.
|
||||
// that all data items with the same label combination should have the same buckets.
|
||||
Data map[string]float64 `json:"data"`
|
||||
// Unit is the data unit. Notice that all data items with the same label combination
|
||||
// should have the same unit.
|
||||
|
|
|
@ -228,7 +228,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
|||
return err
|
||||
}, ns, labelRCName, false)
|
||||
|
||||
// these two replicas should all be rejected since podAntiAffinity says it they anit-affinity with pod {"service": "S1"}
|
||||
// these two replicas should all be rejected since podAntiAffinity says it they anti-affinity with pod {"service": "S1"}
|
||||
verifyReplicasResult(cs, 0, replica, ns, labelRCName)
|
||||
})
|
||||
})
|
||||
|
|
|
@ -570,13 +570,13 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||
|
||||
port := int32(54321)
|
||||
By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP 127.0.0.1 and expect scheduled", port))
|
||||
creatHostPortPodOnNode(f, "pod1", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, true)
|
||||
createHostPortPodOnNode(f, "pod1", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, true)
|
||||
|
||||
By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled", port))
|
||||
creatHostPortPodOnNode(f, "pod2", ns, "127.0.0.2", port, v1.ProtocolTCP, nodeSelector, true)
|
||||
createHostPortPodOnNode(f, "pod2", ns, "127.0.0.2", port, v1.ProtocolTCP, nodeSelector, true)
|
||||
|
||||
By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides", port))
|
||||
creatHostPortPodOnNode(f, "pod3", ns, "127.0.0.2", port, v1.ProtocolUDP, nodeSelector, true)
|
||||
createHostPortPodOnNode(f, "pod3", ns, "127.0.0.2", port, v1.ProtocolUDP, nodeSelector, true)
|
||||
})
|
||||
|
||||
It("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() {
|
||||
|
@ -596,10 +596,10 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||
|
||||
port := int32(54322)
|
||||
By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port))
|
||||
creatHostPortPodOnNode(f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true)
|
||||
createHostPortPodOnNode(f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true)
|
||||
|
||||
By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port))
|
||||
creatHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, false)
|
||||
createHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, false)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -803,7 +803,7 @@ func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectR
|
|||
}
|
||||
|
||||
// create pod which using hostport on the specified node according to the nodeSelector
|
||||
func creatHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeSelector map[string]string, expectScheduled bool) {
|
||||
func createHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeSelector map[string]string, expectScheduled bool) {
|
||||
createPausePod(f, pausePodConfig{
|
||||
Name: podName,
|
||||
Ports: []v1.ContainerPort{
|
||||
|
|
|
@ -67,7 +67,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
|
|||
// 1. node lifecycle manager generate a status change: [NodeReady=true, status=ConditionUnknown]
|
||||
// 1. it's applied with node.kubernetes.io/unreachable=:NoExecute taint
|
||||
// 2. pods without toleration are applied with toleration with tolerationSeconds=300
|
||||
// 3. pods with toleration and without tolerationSeconds won't be modifed, and won't be evicted
|
||||
// 3. pods with toleration and without tolerationSeconds won't be modified, and won't be evicted
|
||||
// 4. pods with toleration and with tolerationSeconds won't be modified, and will be evicted after tolerationSeconds
|
||||
// When network issue recovers, it's expected to see:
|
||||
// 5. node lifecycle manager generate a status change: [NodeReady=true, status=ConditionTrue]
|
||||
|
|
|
@ -144,6 +144,6 @@ func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeIn
|
|||
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
|
||||
}
|
||||
if pv_ssd != nil {
|
||||
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv_ssd.Name), "Faled to delete PV ", pv_ssd.Name)
|
||||
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv_ssd.Name), "Failed to delete PV ", pv_ssd.Name)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -127,7 +127,7 @@ func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, fol
|
|||
return vmFolder.Reference(), nil
|
||||
}
|
||||
|
||||
// CreateVolume creates a vsphere volume using given volume paramemters specified in VolumeOptions.
|
||||
// CreateVolume creates a vsphere volume using given volume parameters specified in VolumeOptions.
|
||||
// If volume is created successfully the canonical disk path is returned else error is returned.
|
||||
func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef types.ManagedObjectReference) (string, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
Perform vsphere volume life cycle management at scale based on user configurable value for number of volumes.
|
||||
The following actions will be performed as part of this test.
|
||||
|
||||
1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capalibilies.)
|
||||
1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.)
|
||||
2. Read VCP_SCALE_VOLUME_COUNT, VCP_SCALE_INSTANCES, VCP_SCALE_VOLUMES_PER_POD, VSPHERE_SPBM_POLICY_NAME, VSPHERE_DATASTORE from System Environment.
|
||||
3. Launch VCP_SCALE_INSTANCES goroutine for creating VCP_SCALE_VOLUME_COUNT volumes. Each goroutine is responsible for create/attach of VCP_SCALE_VOLUME_COUNT/VCP_SCALE_INSTANCES volumes.
|
||||
4. Read VCP_SCALE_VOLUMES_PER_POD from System Environment. Each pod will be have VCP_SCALE_VOLUMES_PER_POD attached to it.
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
Induce stress to create volumes in parallel with multiple threads based on user configurable values for number of threads and iterations per thread.
|
||||
The following actions will be performed as part of this test.
|
||||
|
||||
1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capalibilies.)
|
||||
1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.)
|
||||
2. READ VCP_STRESS_INSTANCES, VCP_STRESS_ITERATIONS, VSPHERE_SPBM_POLICY_NAME and VSPHERE_DATASTORE from System Environment.
|
||||
3. Launch goroutine for volume lifecycle operations.
|
||||
4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS
|
||||
|
|
|
@ -151,7 +151,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
|
||||
// Invalid VSAN storage capabilties parameters.
|
||||
// Invalid VSAN storage capabilities parameters.
|
||||
It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
|
||||
scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
|
||||
|
|
|
@ -113,7 +113,7 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
|
|||
}
|
||||
|
||||
// Test waits for the upgrade to complete, and then verifies
|
||||
// with a connectvity check to the loadbalancer ip.
|
||||
// with a connectivity check to the loadbalancer ip.
|
||||
func (t *IngressUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
|
||||
switch upgrade {
|
||||
case MasterUpgrade, ClusterUpgrade:
|
||||
|
|
|
@ -49,7 +49,7 @@ const (
|
|||
var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
const (
|
||||
// The data collection time of resource collector and the standalone cadvisor
|
||||
// is not synchronizated, so resource collector may miss data or
|
||||
// is not synchronized, so resource collector may miss data or
|
||||
// collect duplicated data
|
||||
containerStatsPollingPeriod = 500 * time.Millisecond
|
||||
)
|
||||
|
|
|
@ -89,7 +89,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
|||
By("Restarting Kubelet and waiting for the current running pod to restart")
|
||||
restartKubelet()
|
||||
|
||||
By("Confirming that after a kubelet and pod restart, GPU assignement is kept")
|
||||
By("Confirming that after a kubelet and pod restart, GPU assignment is kept")
|
||||
ensurePodContainerRestart(f, p1.Name, p1.Name)
|
||||
devIdRestart1 := parseLog(f, p1.Name, p1.Name, deviceIDRE)
|
||||
Expect(devIdRestart1).To(Equal(devId1))
|
||||
|
|
|
@ -75,7 +75,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
|||
|
||||
By("wait for the mirror pod to be updated")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid)
|
||||
return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
|
||||
By("check the mirror pod container image is updated")
|
||||
|
@ -101,7 +101,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
|||
|
||||
By("wait for the mirror pod to be recreated")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid)
|
||||
return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
/*
|
||||
|
@ -121,7 +121,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
|||
|
||||
By("wait for the mirror pod to be recreated")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid)
|
||||
return checkMirrorPodRecreatedAndRunning(f.ClientSet, mirrorPodName, ns, uid)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
AfterEach(func() {
|
||||
|
@ -191,7 +191,7 @@ func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func checkMirrorPodRecreatedAndRunnig(cl clientset.Interface, name, namespace string, oUID types.UID) error {
|
||||
func checkMirrorPodRecreatedAndRunning(cl clientset.Interface, name, namespace string, oUID types.UID) error {
|
||||
pod, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
|
||||
|
|
|
@ -79,7 +79,7 @@ func expectFileValToEqual(filePath string, expectedValue, delta int64) error {
|
|||
return fmt.Errorf("failed to parse output %v", err)
|
||||
}
|
||||
|
||||
// Ensure that values are within a delta range to work arounding rounding errors.
|
||||
// Ensure that values are within a delta range to work around rounding errors.
|
||||
if (actual < (expectedValue - delta)) || (actual > (expectedValue + delta)) {
|
||||
return fmt.Errorf("Expected value at %q to be between %d and %d. Got %d", filePath, (expectedValue - delta), (expectedValue + delta), actual)
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
// from NAS parallel benchmark (NPB) suite.
|
||||
type npbEPWorkload struct{}
|
||||
|
||||
// Ensure npbEPWorkload implemets NodePerfWorkload interface.
|
||||
// Ensure npbEPWorkload implements NodePerfWorkload interface.
|
||||
var _ NodePerfWorkload = &npbEPWorkload{}
|
||||
|
||||
func (w npbEPWorkload) Name() string {
|
||||
|
|
|
@ -69,7 +69,7 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, image
|
|||
// Create the temp staging directory
|
||||
klog.V(2).Infof("Staging test binaries on %q", host)
|
||||
workspace := newWorkspaceDir()
|
||||
// Do not sudo here, so that we can use scp to copy test archive to the directdory.
|
||||
// Do not sudo here, so that we can use scp to copy test archive to the directory.
|
||||
if output, err := SSHNoSudo(host, "mkdir", workspace); err != nil {
|
||||
// Exit failure with the error
|
||||
return "", false, fmt.Errorf("failed to create workspace directory %q on host %q: %v output: %q", workspace, host, err, output)
|
||||
|
|
|
@ -105,7 +105,7 @@ func (o *Observer) StartObserving() error {
|
|||
func (o *Observer) Results(name string, config *Config) *Results {
|
||||
var (
|
||||
firstAdd time.Time // earliest time any node was added (first node add)
|
||||
lastAssignment time.Time // latest time any node was assignged CIDR (last node assignment)
|
||||
lastAssignment time.Time // latest time any node was assigned CIDR (last node assignment)
|
||||
)
|
||||
o.wg.Wait()
|
||||
close(o.stopChan) // shutdown the shared informer
|
||||
|
|
|
@ -223,7 +223,7 @@ func testAudit(t *testing.T, version string) {
|
|||
}
|
||||
}
|
||||
|
||||
// configMapOperations is a set of known operations perfomed on the configmap type
|
||||
// configMapOperations is a set of known operations performed on the configmap type
|
||||
// which correspond to the expected events.
|
||||
// This is shared by the dynamic test
|
||||
func configMapOperations(t *testing.T, kubeclient kubernetes.Interface) {
|
||||
|
|
|
@ -898,7 +898,7 @@ func TestNodePIDPressure(t *testing.T) {
|
|||
t.Fatalf("Cannot update node: %v", err)
|
||||
}
|
||||
|
||||
// Creats test pod.
|
||||
// Create test pod.
|
||||
testPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pidpressure-fake-name"},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
Loading…
Reference in New Issue