e2e test for addon update

pull/6/head
Marek Biskup 2015-06-15 16:31:17 +02:00
parent e1a03ed737
commit dcc4034d57
6 changed files with 449 additions and 4 deletions

View File

@ -19,6 +19,8 @@
# managed result is of that. Start everything below that directory.
KUBECTL=/usr/local/bin/kubectl
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-600}
function create-kubeconfig-secret() {
local -r token=$1
local -r username=$2
@ -118,7 +120,7 @@ function create-resource-from-string() {
# The business logic for whether a given object should be created
# was already enforced by salt, and /etc/kubernetes/addons is the
# managed result is of that. Start everything below that directory.
echo "== Kubernetes addon manager started at $(date -Is) =="
echo "== Kubernetes addon manager started at $(date -Is) with ADDON_CHECK_INTERVAL_SEC=${ADDON_CHECK_INTERVAL_SEC}=="
# Load the kube-env, which has all the environment variables we care
# about, in a flat yaml format.
@ -167,7 +169,7 @@ done
while true; do
#kube-addon-update.sh must be deployed in the same directory as this file
`dirname $0`/kube-addon-update.sh /etc/kubernetes/addons
sleep 600
sleep $ADDON_CHECK_INTERVAL_SEC
done

View File

@ -45,6 +45,8 @@ type ConditionFunc func() (done bool, err error)
// may be missed if the condition takes too long or the time window is too short.
// If you pass maxTimes = 0, Poll will loop until condition returns true or an
// error.
// Poll always waits the interval before the first check of the condition.
// TODO: create a separate PollImmediate function that does not wait.
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
return WaitFor(poller(interval, timeout), condition)
}

413
test/e2e/addon_update.go Normal file
View File

@ -0,0 +1,413 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"bytes"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"golang.org/x/crypto/ssh"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// TODO: it would probably be slightly better to build up the objects
// in the code and then serialize to yaml.
var addon_controller_v1 = `
apiVersion: v1beta3
kind: ReplicationController
metadata:
name: addon-test-v1
namespace: default
labels:
k8s-app: addon-test
version: v1
kubernetes.io/cluster-service: "true"
spec:
replicas: 2
selector:
k8s-app: addon-test
version: v1
template:
metadata:
labels:
k8s-app: addon-test
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/serve_hostname:1.1
name: addon-test
ports:
- containerPort: 9376
protocol: TCP
`
var addon_controller_v2 = `
apiVersion: v1beta3
kind: ReplicationController
metadata:
name: addon-test-v2
namespace: default
labels:
k8s-app: addon-test
version: v2
kubernetes.io/cluster-service: "true"
spec:
replicas: 2
selector:
k8s-app: addon-test
version: v2
template:
metadata:
labels:
k8s-app: addon-test
version: v2
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/serve_hostname:1.1
name: addon-test
ports:
- containerPort: 9376
protocol: TCP
`
var addon_service_v1 = `
apiVersion: v1beta3
kind: Service
metadata:
name: addon-test
namespace: default
labels:
k8s-app: addon-test
kubernetes.io/cluster-service: "true"
kubernetes.io/name: addon-test
spec:
ports:
- port: 9376
protocol: TCP
targetPort: 9376
selector:
k8s-app: addon-test
`
var addon_service_v2 = `
apiVersion: v1beta3
kind: Service
metadata:
name: addon-test-updated
namespace: default
labels:
k8s-app: addon-test
kubernetes.io/cluster-service: "true"
kubernetes.io/name: addon-test
newLabel: newValue
spec:
ports:
- port: 9376
protocol: TCP
targetPort: 9376
selector:
k8s-app: addon-test
`
var invalid_addon_controller_v1 = `
apiVersion: v1beta3
kind: ReplicationController
metadata:
name: invalid-addon-test-v1
namespace: default
labels:
k8s-app: invalid-addon-test
version: v1
spec:
replicas: 2
selector:
k8s-app: invalid-addon-test
version: v1
template:
metadata:
labels:
k8s-app: invalid-addon-test
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/serve_hostname:1.1
name: invalid-addon-test
ports:
- containerPort: 9376
protocol: TCP
`
var invalid_addon_service_v1 = `
apiVersion: v1beta3
kind: Service
metadata:
name: ivalid-addon-test
namespace: default
labels:
k8s-app: invalid-addon-test
kubernetes.io/name: invalid-addon-test
spec:
ports:
- port: 9377
protocol: TCP
targetPort: 9376
selector:
k8s-app: invalid-addon-test
`
var addonTestPollInterval = 3 * time.Second
var addonTestPollTimeout = 1 * time.Minute
var addonNamespace = api.NamespaceDefault // addons are in the default namespace
type stringPair struct {
data, fileName string
}
var _ = Describe("Addon update", func() {
var dir string
var sshClient *ssh.Client
var c *client.Client
var namespace *api.Namespace
BeforeEach(func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
sshClient, err = getSSHClient()
Expect(err).NotTo(HaveOccurred())
namespace, err = createTestingNS("addon-update-test", c)
Expect(err).NotTo(HaveOccurred())
// do not use "service" command because it clears the environment variables
sshExecAndVerify(sshClient, "sudo TEST_ADDON_CHECK_INTERVAL_SEC=1 /etc/init.d/kube-addons restart")
})
AfterEach(func() {
if sshClient != nil {
// restart addon_update with the default options
sshExec(sshClient, "sudo /etc/init.d/kube-addons restart")
sshClient.Close()
}
if err := c.Namespaces().Delete(namespace.Name); err != nil {
Failf("Couldn't delete ns %q: %s", namespace, err)
}
// Paranoia-- prevent reuse!
namespace = nil
c = nil
})
// WARNING: the test is not parallel-friendly!
It("should propagate add-on file changes", func() {
//these tests are long, so I squeezed several cases in one scenario
Expect(sshClient).NotTo(BeNil())
dir = namespace.Name // we use it only to give a unique string for each test execution
// This test requires SSH, so the provider check should be identical to
// those tests.
if !providerIs("gce") {
Logf(fmt.Sprintf("Skipping test, which is not implemented for %s", testContext.Provider))
return
}
temporaryRemotePathPrefix := "addon-test-dir"
temporaryRemotePath := temporaryRemotePathPrefix + "/" + dir // in home directory on kubernetes-master
defer sshExec(sshClient, fmt.Sprintf("rm -rf %s", temporaryRemotePathPrefix)) // ignore the result in cleanup
sshExecAndVerify(sshClient, fmt.Sprintf("mkdir -p %s", temporaryRemotePath))
rcv1 := "addon-controller-v1.yaml"
rcv2 := "addon-controller-v2.yaml"
rcInvalid := "invalid-addon-controller-v1.yaml"
svcv1 := "addon-service-v1.yaml"
svcv2 := "addon-service-v2.yaml"
svcInvalid := "invalid-addon-service-v1.yaml"
var remoteFiles []stringPair = []stringPair{
{addon_controller_v1, rcv1},
{addon_controller_v2, rcv2},
{addon_service_v1, svcv1},
{addon_service_v2, svcv2},
{invalid_addon_controller_v1, rcInvalid},
{invalid_addon_service_v1, svcInvalid},
}
for _, p := range remoteFiles {
err := writeRemoteFile(sshClient, p.data, temporaryRemotePath, p.fileName, 0644)
Expect(err).NotTo(HaveOccurred())
}
// directory on kubernetes-master
destinationDirPrefix := "/etc/kubernetes/addons/addon-test-dir"
destinationDir := destinationDirPrefix + "/" + dir
// cleanup from previous tests
_, _, _, err := sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix))
Expect(err).NotTo(HaveOccurred())
defer sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) // ignore result in cleanup
sshExecAndVerify(sshClient, fmt.Sprintf("sudo mkdir -p %s", destinationDir))
By("copy invalid manifests to the destination dir (without kubernetes.io/cluster-service label)")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcInvalid, destinationDir, rcInvalid))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcInvalid, destinationDir, svcInvalid))
// we will verify at the end of the test that the objects weren't created from the invalid manifests
By("copy new manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv1, destinationDir, rcv1))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv1, destinationDir, svcv1))
waitForServiceInAddonTest(c, "addon-test", true)
waitForReplicationControllerInAddonTest(c, "addon-test-v1", true)
By("update manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv2, destinationDir, rcv2))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv2, destinationDir, svcv2))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv1))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv1))
/**
* Note that we have a small race condition here - the kube-addon-updater
* May notice that a new rc/service file appeared, while the old one will still be there.
* But it is ok - as long as we don't have rolling update, the result will be the same
*/
waitForServiceInAddonTest(c, "addon-test-updated", true)
waitForReplicationControllerInAddonTest(c, "addon-test-v2", true)
waitForServiceInAddonTest(c, "addon-test", false)
waitForReplicationControllerInAddonTest(c, "addon-test-v1", false)
By("remove manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2))
waitForServiceInAddonTest(c, "addon-test-updated", false)
waitForReplicationControllerInAddonTest(c, "invalid-addon-test-v1", false)
By("verify invalid API addons weren't created")
_, err = c.ReplicationControllers(addonNamespace).Get("invalid-addon-test-v1")
Expect(err).To(HaveOccurred())
_, err = c.Services(addonNamespace).Get("ivalid-addon-test")
Expect(err).To(HaveOccurred())
// invalid addons will be deleted by the deferred function
})
})
func waitForServiceInAddonTest(c *client.Client, name string, exist bool) {
expectNoError(waitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
}
func waitForReplicationControllerInAddonTest(c *client.Client, name string, exist bool) {
expectNoError(waitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
}
// TODO marekbiskup 2015-06-11: merge the ssh code into pkg/util/ssh.go after
// kubernetes v1.0 is released. In particular the code of sshExec.
func getSSHClient() (*ssh.Client, error) {
// Get a signer for the provider.
signer, err := getSigner(testContext.Provider)
if err != nil {
return nil, fmt.Errorf("error getting signer for provider %s: '%v'", testContext.Provider, err)
}
config := &ssh.ClientConfig{
User: os.Getenv("USER"),
Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)},
}
host := getMasterHost() + ":22"
client, err := ssh.Dial("tcp", host, config)
if err != nil {
return nil, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err)
}
return client, err
}
func sshExecAndVerify(client *ssh.Client, cmd string) {
_, _, rc, err := sshExec(client, cmd)
Expect(err).NotTo(HaveOccurred())
Expect(rc).To(Equal(0))
}
func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
Logf(fmt.Sprintf("Executing '%s' on %v", cmd, client.RemoteAddr()))
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err)
}
defer session.Close()
// Run the command.
code := 0
var bout, berr bytes.Buffer
session.Stdout, session.Stderr = &bout, &berr
err = session.Run(cmd)
if err != nil {
// Check whether the command failed to run or didn't complete.
if exiterr, ok := err.(*ssh.ExitError); ok {
// If we got an ExitError and the exit code is nonzero, we'll
// consider the SSH itself successful (just that the command run
// errored on the host).
if code = exiterr.ExitStatus(); code != 0 {
err = nil
}
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s: '%v'", cmd, client.RemoteAddr(), err)
}
}
return bout.String(), berr.String(), code, err
}
func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error {
Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
session, err := sshClient.NewSession()
if err != nil {
return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err)
}
defer session.Close()
fileSize := len(data)
go func() {
// ignore errors here. scp whould return errors if something goes wrong.
pipe, _ := session.StdinPipe()
defer pipe.Close()
fmt.Fprintf(pipe, "C%#o %d %s\n", mode, fileSize, fileName)
io.Copy(pipe, strings.NewReader(data))
fmt.Fprint(pipe, "\x00")
}()
if err := session.Run(fmt.Sprintf("scp -t %s", dir)); err != nil {
return err
}
return nil
}

View File

@ -70,7 +70,7 @@ var _ = Describe("Etcd failure", func() {
func etcdFailTest(framework Framework, failCommand, fixCommand string, repeat bool) {
// This test requires SSH, so the provider check should be identical to
// those tests.
if !providerIs("gce", "gke") {
if !providerIs("gce") {
By(fmt.Sprintf("Skippingt test, which is not implemented for %s", testContext.Provider))
return
}

View File

@ -94,7 +94,7 @@ func testReboot(c *client.Client, rebootCmd string) {
// there (the limiting factor is the implementation of util.go's
// getSigner(...)).
provider := testContext.Provider
if !providerIs("gce", "gke") {
if !providerIs("gce") {
By(fmt.Sprintf("Skipping reboot test, which is not implemented for %s", provider))
return
}

View File

@ -479,6 +479,34 @@ func waitForRCPodToDisappear(c *client.Client, ns, rcName, podName string) error
})
}
// waits until the service appears (exists == true), or disappears (exists == false)
func waitForService(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
_, err := c.Services(namespace).Get(name)
if err != nil {
Logf("Get service %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil
} else {
Logf("Service %s in namespace %s found.", name, namespace)
return exist, nil
}
})
}
// waits until the RC appears (exists == true), or disappears (exists == false)
func waitForReplicationController(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
_, err := c.ReplicationControllers(namespace).Get(name)
if err != nil {
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil
} else {
Logf("ReplicationController %s in namespace %s found.", name, namespace)
return exist, nil
}
})
}
// Context for checking pods responses by issuing GETs to them and verifying if the answer with pod name.
type podResponseChecker struct {
c *client.Client