mirror of https://github.com/k3s-io/k3s
Node e2e test fixes:
- Improve documentation and method naming - Fix command this is run remotely - Never reschedule the busybox logging test pod since it is supposed to terminate - Update log test condition retrylogic to correctly retry instead of failing the test - localhost -> 127.0.0.1 to work on coreos - give name to etcd to work on coreos - allow using full hostname for nodename for coreospull/6/head
parent
229f40e69f
commit
ad37e2654e
|
@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
// To run tests in this suite
|
||||||
|
// `$ ginkgo -- --node-name node-e2e-test-1 --api-server-address <serveraddress> --logtostderr`
|
||||||
package e2e_node
|
package e2e_node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -34,10 +34,10 @@ import (
|
||||||
|
|
||||||
var freePortRegexp = regexp.MustCompile(".+:([0-9]+)")
|
var freePortRegexp = regexp.MustCompile(".+:([0-9]+)")
|
||||||
|
|
||||||
type TearDown func()
|
type TearDown func() *RunResult
|
||||||
|
|
||||||
type GCloudClient interface {
|
type GCloudClient interface {
|
||||||
CopyAndWaitTillHealthy(
|
RunAndWaitTillHealthy(
|
||||||
sudo bool, copyBin bool, remotePort string,
|
sudo bool, copyBin bool, remotePort string,
|
||||||
timeout time.Duration, healthUrl string, bin string, args ...string) (*CmdHandle, error)
|
timeout time.Duration, healthUrl string, bin string, args ...string) (*CmdHandle, error)
|
||||||
}
|
}
|
||||||
|
@ -102,7 +102,7 @@ func (gc *gCloudClientImpl) CopyToHost(from string, to string) ([]byte, error) {
|
||||||
return exec.Command("gcloud", args...).CombinedOutput()
|
return exec.Command("gcloud", args...).CombinedOutput()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gc *gCloudClientImpl) CopyAndRun(
|
func (gc *gCloudClientImpl) Run(
|
||||||
sudo bool, copyBin bool, remotePort string, bin string, args ...string) *CmdHandle {
|
sudo bool, copyBin bool, remotePort string, bin string, args ...string) *CmdHandle {
|
||||||
|
|
||||||
h := &CmdHandle{}
|
h := &CmdHandle{}
|
||||||
|
@ -119,54 +119,57 @@ func (gc *gCloudClientImpl) CopyAndRun(
|
||||||
}
|
}
|
||||||
h.LPort = getLocalPort()
|
h.LPort = getLocalPort()
|
||||||
|
|
||||||
h.TearDown = func() {
|
h.TearDown = func() *RunResult {
|
||||||
out, err := gc.Command("sudo", "pkill", f)
|
out, err := gc.Command("sudo", "pkill", f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.Output <- RunResult{out, err, fmt.Sprintf("pkill %s", cmd)}
|
return &RunResult{out, err, fmt.Sprintf("pkill %s", f)}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
out, err = gc.Command("rm", "-rf", tDir)
|
out, err = gc.Command("rm", "-rf", tDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.Output <- RunResult{out, err, fmt.Sprintf("rm -rf %s", tDir)}
|
return &RunResult{out, err, fmt.Sprintf("rm -rf %s", tDir)}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
return &RunResult{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the tmp directory
|
// Run the commands in a Go fn so that this method doesn't block when writing to a channel
|
||||||
out, err := gc.Command("mkdir", "-p", tDir)
|
// to report an error
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("mkdir failed %v", err)
|
|
||||||
h.Output <- RunResult{out, err, fmt.Sprintf("mkdir -p %s", tDir)}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy the binary
|
|
||||||
if copyBin {
|
|
||||||
out, err = gc.CopyToHost(bin, tDir)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("copy-files failed %v", err)
|
|
||||||
h.Output <- RunResult{out, err, fmt.Sprintf("copy-files %s %s", bin, tDir)}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do the setup
|
|
||||||
go func() {
|
go func() {
|
||||||
// Start the process
|
// Create the tmp directory
|
||||||
out, err = gc.TunnelCommand(sudo, h.LPort, remotePort, tDir, fmt.Sprintf("./%s", f), args...)
|
out, err := gc.Command("mkdir", "-p", tDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("command failed %v", err)
|
glog.Errorf("mkdir failed %v %s", err, out)
|
||||||
h.Output <- RunResult{out, err, fmt.Sprintf("%s %s", f, strings.Join(args, " "))}
|
h.Output <- RunResult{out, err, fmt.Sprintf("mkdir -p %s", tDir)}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Copy the binary
|
||||||
|
if copyBin {
|
||||||
|
out, err = gc.CopyToHost(bin, tDir)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("copy-files failed %v %s", err, out)
|
||||||
|
h.Output <- RunResult{out, err, fmt.Sprintf("copy-files %s %s", bin, tDir)}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do the setup
|
||||||
|
go func() {
|
||||||
|
// Start the process
|
||||||
|
out, err = gc.TunnelCommand(sudo, h.LPort, remotePort, tDir, cmd, args...)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("command failed %v %s", err, out)
|
||||||
|
h.Output <- RunResult{out, err, fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
}()
|
}()
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gc *gCloudClientImpl) CopyAndWaitTillHealthy(
|
func (gc *gCloudClientImpl) RunAndWaitTillHealthy(
|
||||||
sudo bool, copyBin bool,
|
sudo bool, copyBin bool,
|
||||||
remotePort string, timeout time.Duration, healthUrl string, bin string, args ...string) (*CmdHandle, error) {
|
remotePort string, timeout time.Duration, healthUrl string, bin string, args ...string) (*CmdHandle, error) {
|
||||||
h := gc.CopyAndRun(sudo, copyBin, remotePort, bin, args...)
|
h := gc.Run(sudo, copyBin, remotePort, bin, args...)
|
||||||
eTime := time.Now().Add(timeout)
|
eTime := time.Now().Add(timeout)
|
||||||
done := false
|
done := false
|
||||||
for eTime.After(time.Now()) && !done {
|
for eTime.After(time.Now()) && !done {
|
||||||
|
|
|
@ -43,13 +43,16 @@ var _ = Describe("Kubelet", func() {
|
||||||
Namespace: api.NamespaceDefault,
|
Namespace: api.NamespaceDefault,
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
|
// Force the Pod to schedule to the node without a scheduler running
|
||||||
NodeName: *nodeName,
|
NodeName: *nodeName,
|
||||||
|
// Don't restart the Pod since it is expected to exit
|
||||||
|
RestartPolicy: api.RestartPolicyNever,
|
||||||
Containers: []api.Container{
|
Containers: []api.Container{
|
||||||
{
|
{
|
||||||
Image: "busybox",
|
Image: "gcr.io/google_containers/busybox",
|
||||||
Name: "busybox",
|
Name: "busybox",
|
||||||
Command: []string{"echo", "'Hello World'"},
|
Command: []string{"echo", "'Hello World'"},
|
||||||
ImagePullPolicy: "IfNotPresent",
|
ImagePullPolicy: api.PullIfNotPresent,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -59,7 +62,7 @@ var _ = Describe("Kubelet", func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
It("it should print the output to logs", func() {
|
It("it should print the output to logs", func() {
|
||||||
errs := Retry(time.Minute*3, time.Second*2, cl, func(cl *client.Client) error {
|
errs := Retry(time.Minute, time.Second*4, func() error {
|
||||||
rc, err := cl.Pods(api.NamespaceDefault).GetLogs("busybox", &api.PodLogOptions{}).Stream()
|
rc, err := cl.Pods(api.NamespaceDefault).GetLogs("busybox", &api.PodLogOptions{}).Stream()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -67,7 +70,9 @@ var _ = Describe("Kubelet", func() {
|
||||||
defer rc.Close()
|
defer rc.Close()
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
buf.ReadFrom(rc)
|
buf.ReadFrom(rc)
|
||||||
Expect(buf.String()).To(Equal("'Hello World'\n"))
|
if buf.String() != "'Hello World'\n" {
|
||||||
|
return fmt.Errorf("Expected %s to match 'Hello World'", buf.String())
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
Expect(errs).To(BeEmpty(), fmt.Sprintf("Failed to get Logs"))
|
Expect(errs).To(BeEmpty(), fmt.Sprintf("Failed to get Logs"))
|
||||||
|
|
|
@ -83,9 +83,9 @@ func main() {
|
||||||
go func(host string) {
|
go func(host string) {
|
||||||
out, err := runTests(host)
|
out, err := runTests(host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Infof("Failure Finished Test Suite %s %v", out, err)
|
glog.Infof("Failure Finished Host %s Test Suite %s %v", host, out, err)
|
||||||
} else {
|
} else {
|
||||||
glog.Infof("Success Finished Test Suite %s", out)
|
glog.Infof("Success Finished Host %s Test Suite %s", host, out)
|
||||||
}
|
}
|
||||||
w.Done()
|
w.Done()
|
||||||
}(h)
|
}(h)
|
||||||
|
@ -113,20 +113,21 @@ func WaitForUser() {
|
||||||
u.Done()
|
u.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
func runTests(host string) ([]byte, error) {
|
func runTests(fullhost string) ([]byte, error) {
|
||||||
|
host := strings.Split(fullhost, ".")[0]
|
||||||
c := gcloud.NewGCloudClient(host, *zone)
|
c := gcloud.NewGCloudClient(host, *zone)
|
||||||
// TODO(pwittrock): Come up with something better for bootstrapping the environment.
|
// TODO(pwittrock): Come up with something better for bootstrapping the environment.
|
||||||
eh, err := c.CopyAndWaitTillHealthy(
|
eh, err := c.RunAndWaitTillHealthy(
|
||||||
false, false, "4001", healthyTimeoutDuration, "v2/keys/", "etcd", "--data-dir", "./")
|
false, false, "4001", healthyTimeoutDuration, "v2/keys/", "etcd", "--data-dir", "./", "--name", "e2e-node")
|
||||||
defer func() { eh.TearDown() }()
|
defer func() { eh.TearDown() }()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Host %s failed to run command %v", host, err)
|
return nil, fmt.Errorf("Host %s failed to run command %v", host, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
apiBin := filepath.Join(kubeRoot, *kubeOutputRelPath, "kube-apiserver")
|
apiBin := filepath.Join(kubeRoot, *kubeOutputRelPath, "kube-apiserver")
|
||||||
ah, err := c.CopyAndWaitTillHealthy(
|
ah, err := c.RunAndWaitTillHealthy(
|
||||||
true, true, "8080", healthyTimeoutDuration, "healthz", apiBin, "--service-cluster-ip-range",
|
true, true, "8080", healthyTimeoutDuration, "healthz", apiBin, "--service-cluster-ip-range",
|
||||||
"10.0.0.1/24", "--insecure-bind-address", "0.0.0.0", "--etcd-servers", "http://localhost:4001",
|
"10.0.0.1/24", "--insecure-bind-address", "0.0.0.0", "--etcd-servers", "http://127.0.0.1:4001",
|
||||||
"--cluster-name", "kubernetes", "--v", "2", "--kubelet-port", "10250")
|
"--cluster-name", "kubernetes", "--v", "2", "--kubelet-port", "10250")
|
||||||
defer func() { ah.TearDown() }()
|
defer func() { ah.TearDown() }()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -134,8 +135,8 @@ func runTests(host string) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
kubeletBin := filepath.Join(kubeRoot, *kubeOutputRelPath, "kubelet")
|
kubeletBin := filepath.Join(kubeRoot, *kubeOutputRelPath, "kubelet")
|
||||||
kh, err := c.CopyAndWaitTillHealthy(
|
kh, err := c.RunAndWaitTillHealthy(
|
||||||
true, true, "10255", healthyTimeoutDuration, "healthz", kubeletBin, "--api-servers", "http://localhost:8080",
|
true, true, "10255", healthyTimeoutDuration, "healthz", kubeletBin, "--api-servers", "http://127.0.0.1:8080",
|
||||||
"--logtostderr", "--address", "0.0.0.0", "--port", "10250")
|
"--logtostderr", "--address", "0.0.0.0", "--port", "10250")
|
||||||
defer func() { kh.TearDown() }()
|
defer func() { kh.TearDown() }()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -150,8 +151,8 @@ func runTests(host string) ([]byte, error) {
|
||||||
ginkoTests := filepath.Join(kubeRoot, ginkoTestRelPath)
|
ginkoTests := filepath.Join(kubeRoot, ginkoTestRelPath)
|
||||||
return exec.Command(
|
return exec.Command(
|
||||||
"ginkgo", ginkoTests, "--",
|
"ginkgo", ginkoTests, "--",
|
||||||
"--kubelet-address", fmt.Sprintf("http://localhost:%s", kh.LPort),
|
"--kubelet-address", fmt.Sprintf("http://127.0.0.1:%s", kh.LPort),
|
||||||
"--api-server-address", fmt.Sprintf("http://localhost:%s", ah.LPort),
|
"--api-server-address", fmt.Sprintf("http://127.0.0.1:%s", ah.LPort),
|
||||||
"--node-name", host,
|
"--node-name", fullhost,
|
||||||
"-logtostderr").CombinedOutput()
|
"-logtostderr").CombinedOutput()
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,16 +18,27 @@ package e2e_node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type RetryFn func(cl *client.Client) error
|
// RetryFn represents a retryable test condition. It returns an error if the condition is not met
|
||||||
|
// otherwise returns nil for success.
|
||||||
|
type RetryFn func() error
|
||||||
|
|
||||||
func Retry(maxWait time.Duration, wait time.Duration, cl *client.Client, retry RetryFn) []error {
|
// Retry retries the RetryFn for a maximum of maxWait time. The wait duration is waited between
|
||||||
|
// retries. If the success condition is not met in maxWait time, the list of encountered errors
|
||||||
|
// is returned. If successful returns an empty list.
|
||||||
|
// Example:
|
||||||
|
// Expect(Retry(time.Minute*1, time.Second*2, func() error {
|
||||||
|
// if success {
|
||||||
|
// return nil
|
||||||
|
// } else {
|
||||||
|
// return errors.New("Failed")
|
||||||
|
// }
|
||||||
|
// }).To(BeNil(), fmt.Sprintf("Failed"))
|
||||||
|
func Retry(maxWait time.Duration, wait time.Duration, retry RetryFn) []error {
|
||||||
errs := []error{}
|
errs := []error{}
|
||||||
for start := time.Now(); time.Now().Before(start.Add(maxWait)); {
|
for start := time.Now(); time.Now().Before(start.Add(maxWait)); {
|
||||||
if err := retry(cl); err != nil {
|
if err := retry(); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
} else {
|
} else {
|
||||||
return []error{}
|
return []error{}
|
||||||
|
|
Loading…
Reference in New Issue