Cleanup: replace some hardcoded codes and remove unused functions

pull/6/head
NickrenREN 2017-04-24 15:08:39 +08:00
parent 35159f9c45
commit d4376599ba
22 changed files with 37 additions and 114 deletions

View File

@ -368,7 +368,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "srv",
Image: "gcr.io/google_containers/serve_hostname:v1.4",
Image: framework.ServeHostnameImage,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},

View File

@ -94,8 +94,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
f = framework.NewDefaultFramework("daemonsets")
image := "gcr.io/google_containers/serve_hostname:v1.4"
redisImage := "gcr.io/k8s-testimages/redis:e2e"
image := framework.ServeHostnameImage
dsName := "daemon-set"
var ns string

View File

@ -73,7 +73,7 @@ func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) {
checkExistingRCRecovers(f)
testReplicationControllerServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
testReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
}
// For this duration, etcd will be failed by executing a failCommand on the master.

View File

@ -55,7 +55,7 @@ var _ = framework.KubeDescribe("Events", func() {
Containers: []v1.Container{
{
Name: "p",
Image: "gcr.io/google_containers/serve_hostname:v1.4",
Image: framework.ServeHostnameImage,
Ports: []v1.ContainerPort{{ContainerPort: 80}},
},
},

View File

@ -497,17 +497,6 @@ func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLate
float64(nodeCount)/(latencyDataLag[len(latencyDataLag)-1].Latency.Minutes()))
}
// testMaximumLatencyValue verifies the highest latency value is less than or equal to
// the given time.Duration. Since the arrays are sorted we are looking at the last
// element which will always be the highest. If the latency is higher than the max Failf
// is called.
func testMaximumLatencyValue(latencies []PodLatencyData, max time.Duration, name string) {
highestLatency := latencies[len(latencies)-1]
if !(highestLatency.Latency <= max) {
Failf("%s were not all under %s: %#v", name, max.String(), latencies)
}
}
func PrintLatencies(latencies []PodLatencyData, header string) {
metrics := ExtractLatencyMetrics(latencies)
Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])

View File

@ -133,29 +133,6 @@ func nodeUpgradeGCE(rawV, img string) error {
return err
}
func cleanupNodeUpgradeGCE(tmplBefore string) {
Logf("Cleaning up any unused node templates")
tmplAfter, err := MigTemplate()
if err != nil {
Logf("Could not get node template post-upgrade; may have leaked template %s", tmplBefore)
return
}
if tmplBefore == tmplAfter {
// The node upgrade failed so there's no need to delete
// anything.
Logf("Node template %s is still in use; not cleaning up", tmplBefore)
return
}
Logf("Deleting node template %s", tmplBefore)
if _, _, err := retryCmd("gcloud", "compute", "instance-templates",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
"delete",
tmplBefore); err != nil {
Logf("gcloud compute instance-templates delete %s call failed with err: %v", tmplBefore, err)
Logf("May have leaked instance template %q", tmplBefore)
}
}
func nodeUpgradeGKE(v string, img string) error {
Logf("Upgrading nodes to version %q and image %q", v, img)
args := []string{

View File

@ -1057,7 +1057,7 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli
config := testutils.RCConfig{
Client: c,
InternalClient: internalClient,
Image: "gcr.io/google_containers/serve_hostname:v1.4",
Image: ServeHostnameImage,
Name: name,
Namespace: ns,
PollInterval: 3 * time.Second,

View File

@ -29,13 +29,8 @@ import (
)
const (
serveHostnameImage = "gcr.io/google_containers/serve_hostname:v1.4"
resizeNodeReadyTimeout = 2 * time.Minute
resizeNodeNotReadyTimeout = 2 * time.Minute
nodeReadinessTimeout = 3 * time.Minute
podNotReadyTimeout = 1 * time.Minute
podReadyTimeout = 2 * time.Minute
testPort = 9376
)
func ResizeGroup(group string, size int32) error {

View File

@ -238,16 +238,6 @@ func RegisterNodeFlags() {
flag.BoolVar(&TestContext.PrepullImages, "prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.")
}
// overwriteFlagsWithViperConfig finds and writes values to flags using viper as input.
func overwriteFlagsWithViperConfig() {
viperFlagSetter := func(f *flag.Flag) {
if viper.IsSet(f.Name) {
f.Value.Set(viper.GetString(f.Name))
}
}
flag.VisitAll(viperFlagSetter)
}
// ViperizeFlags sets up all flag and config processing. Future configuration info should be added to viper, not to flags.
func ViperizeFlags() {

View File

@ -176,6 +176,9 @@ const (
// TODO(justinsb): Avoid hardcoding this.
awsMasterIP = "172.20.0.9"
// Serve hostname image name
ServeHostnameImage = "gcr.io/google_containers/serve_hostname:v1.4"
)
var (
@ -407,9 +410,6 @@ func SkipIfMissingResource(clientPool dynamic.ClientPool, gvr schema.GroupVersio
// ProvidersWithSSH are those providers where each node is accessible with SSH
var ProvidersWithSSH = []string{"gce", "gke", "aws", "local"}
// providersWithMasterSSH are those providers where master node is accessible with SSH
var providersWithMasterSSH = []string{"gce", "gke", "kubemark", "aws", "local"}
type podCondition func(pod *v1.Pod) (bool, error)
// logPodStates logs basic info of provided pods for debugging.
@ -1377,30 +1377,6 @@ func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, nam
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
}
// waitForRCPodOnNode returns the pod from the given replication controller (described by rcName) which is scheduled on the given node.
// In case of failure or too long waiting time, an error is returned.
func waitForRCPodOnNode(c clientset.Interface, ns, rcName, node string) (*v1.Pod, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
var p *v1.Pod = nil
err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
Logf("Waiting for pod %s to appear on node %s", rcName, node)
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == node {
Logf("Pod %s found on node %s", pod.Name, node)
p = &pod
return true, nil
}
}
return false, nil
})
return p, err
}
// WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status.
func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error {
options := metav1.ListOptions{FieldSelector: fields.Set{

View File

@ -85,7 +85,7 @@ func podOnNode(podName, nodeName string, image string) *v1.Pod {
}
func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error {
pod, err := c.Core().Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage))
pod, err := c.Core().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage))
if err == nil {
framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
} else {

View File

@ -48,15 +48,12 @@ import (
)
const (
smallGroupSize = 5
mediumGroupSize = 30
bigGroupSize = 250
smallGroupName = "load-small"
mediumGroupName = "load-medium"
bigGroupName = "load-big"
smallGroupBatchSize = 30
mediumGroupBatchSize = 5
bigGroupBatchSize = 1
smallGroupSize = 5
mediumGroupSize = 30
bigGroupSize = 250
smallGroupName = "load-small"
mediumGroupName = "load-medium"
bigGroupName = "load-big"
// We start RCs/Services/pods/... in different namespace in this test.
// nodeCountPerNamespace determines how many namespaces we will be using
// depending on the number of nodes in the underlying cluster.
@ -143,16 +140,16 @@ var _ = framework.KubeDescribe("Load capacity", func() {
loadTests := []Load{
// The container will consume 1 cpu and 512mb of memory.
{podsPerNode: 3, image: "jess/stress", command: []string{"stress", "-c", "1", "-m", "2"}, kind: api.Kind("ReplicationController")},
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: api.Kind("ReplicationController")},
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: api.Kind("ReplicationController")},
// Tests for other resource types
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: extensions.Kind("Deployment")},
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: batch.Kind("Job")},
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: extensions.Kind("Deployment")},
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: batch.Kind("Job")},
// Test scheduling when daemons are preset
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
// Test with secrets
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: extensions.Kind("Deployment"), secretsPerPod: 2},
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: extensions.Kind("Deployment"), secretsPerPod: 2},
// Special test case which randomizes created resources
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: randomKind},
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: randomKind},
}
for _, testArg := range loadTests {

View File

@ -38,7 +38,7 @@ var _ = framework.KubeDescribe("ReplicationController", func() {
f := framework.NewDefaultFramework("replication-controller")
It("should serve a basic image on each replica with a public image [Conformance]", func() {
testReplicationControllerServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
testReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
})
It("should serve a basic image on each replica with a private image", func() {

View File

@ -78,7 +78,7 @@ var _ = framework.KubeDescribe("ReplicaSet", func() {
f := framework.NewDefaultFramework("replicaset")
It("should serve a basic image on each replica with a public image [Conformance]", func() {
testReplicaSetServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
testReplicaSetServeImageOrFail(f, "basic", framework.ServeHostnameImage)
})
It("should serve a basic image on each replica with a private image", func() {

View File

@ -32,13 +32,11 @@ import (
)
const (
serveHostnameImage = "gcr.io/google_containers/serve_hostname:v1.4"
resizeNodeReadyTimeout = 2 * time.Minute
resizeNodeNotReadyTimeout = 2 * time.Minute
nodeReadinessTimeout = 3 * time.Minute
podNotReadyTimeout = 1 * time.Minute
podReadyTimeout = 2 * time.Minute
testPort = 9376
resizeNodeReadyTimeout = 2 * time.Minute
nodeReadinessTimeout = 3 * time.Minute
podNotReadyTimeout = 1 * time.Minute
podReadyTimeout = 2 * time.Minute
testPort = 9376
)
func svcByName(name string, port int) *v1.Service {
@ -68,7 +66,7 @@ func newSVCByName(c clientset.Interface, ns, name string) error {
func newRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*v1.ReplicationController, error) {
By(fmt.Sprintf("creating replication controller %s", name))
return c.Core().ReplicationControllers(ns).Create(framework.RcByNamePort(
name, replicas, serveHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod))
name, replicas, framework.ServeHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod))
}
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {

View File

@ -37,7 +37,7 @@ var _ = framework.KubeDescribe("Multi-AZ Clusters", func() {
f := framework.NewDefaultFramework("multi-az")
var zoneCount int
var err error
image := "gcr.io/google_containers/serve_hostname:v1.4"
image := framework.ServeHostnameImage
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke", "aws")
if zoneCount <= 0 {

View File

@ -42,7 +42,7 @@ func (DaemonSetUpgradeTest) Name() string { return "daemonset-upgrade" }
func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
daemonSetName := "ds1"
labelSet := map[string]string{"ds-name": daemonSetName}
image := "gcr.io/google_containers/serve_hostname:v1.4"
image := framework.ServeHostnameImage
ns := f.Namespace

View File

@ -173,7 +173,7 @@ func createDaemonSetOrFail(clientset *fedclientset.Clientset, namespace string)
Containers: []v1.Container{
{
Name: "container1",
Image: "gcr.io/google_containers/serve_hostname:v1.4",
Image: framework.ServeHostnameImage,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},

View File

@ -110,7 +110,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/serve_hostname:v1.4",
Image: framework.ServeHostnameImage,
Name: podName,
},
},

View File

@ -21,6 +21,7 @@ go_library(
deps = [
"//pkg/api:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//test/e2e/framework:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -36,6 +36,7 @@ import (
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework"
)
var (
@ -184,7 +185,7 @@ func main() {
Containers: []api.Container{
{
Name: "serve-hostname",
Image: "gcr.io/google_containers/serve_hostname:v1.4",
Image: framework.ServeHostnameImage,
Ports: []api.ContainerPort{{ContainerPort: 9376}},
},
},

View File

@ -203,7 +203,7 @@ func main() {
Containers: []v1.Container{
{
Name: "serve-hostname",
Image: "gcr.io/google_containers/serve_hostname:v1.4",
Image: e2e.ServeHostnameImage,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},