Move node problem detector test into node e2e.

pull/6/head
Random-Liu 2017-02-17 22:54:52 -08:00
parent 2e12711160
commit 1c8e127973
4 changed files with 66 additions and 108 deletions

View File

@ -65,7 +65,6 @@ go_library(
"network_partition.go", "network_partition.go",
"networking.go", "networking.go",
"networking_perf.go", "networking_perf.go",
"node_problem_detector.go",
"nodeoutofdisk.go", "nodeoutofdisk.go",
"opaque_resource.go", "opaque_resource.go",
"pd.go", "pd.go",

View File

@ -15,6 +15,7 @@ go_library(
"container.go", "container.go",
"doc.go", "doc.go",
"image_list.go", "image_list.go",
"node_problem_detector.go",
"resource_collector.go", "resource_collector.go",
"simple_mount.go", "simple_mount.go",
"util.go", "util.go",
@ -25,6 +26,8 @@ go_library(
"//pkg/api/v1:go_default_library", "//pkg/api/v1:go_default_library",
"//pkg/apis/componentconfig:go_default_library", "//pkg/apis/componentconfig:go_default_library",
"//pkg/apis/componentconfig/v1alpha1:go_default_library", "//pkg/apis/componentconfig/v1alpha1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
"//pkg/kubelet/api/v1alpha1/stats:go_default_library", "//pkg/kubelet/api/v1alpha1/stats:go_default_library",
"//pkg/util/procfs:go_default_library", "//pkg/util/procfs:go_default_library",
"//test/e2e/common:go_default_library", "//test/e2e/common:go_default_library",
@ -38,7 +41,9 @@ go_library(
"//vendor:github.com/opencontainers/runc/libcontainer/cgroups", "//vendor:github.com/opencontainers/runc/libcontainer/cgroups",
"//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/api/errors",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/fields",
"//vendor:k8s.io/apimachinery/pkg/labels", "//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/apimachinery/pkg/types",
"//vendor:k8s.io/apimachinery/pkg/util/runtime", "//vendor:k8s.io/apimachinery/pkg/util/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/sets", "//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/apimachinery/pkg/util/uuid", "//vendor:k8s.io/apimachinery/pkg/util/uuid",

View File

@ -42,6 +42,7 @@ var NodeImageWhiteList = sets.NewString(
"gcr.io/google-containers/stress:v1", "gcr.io/google-containers/stress:v1",
"gcr.io/google_containers/busybox:1.24", "gcr.io/google_containers/busybox:1.24",
"gcr.io/google_containers/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff", "gcr.io/google_containers/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff",
"gcr.io/google_containers/node-problem-detector:v0.3.0-alpha.1",
"gcr.io/google_containers/nginx-slim:0.7", "gcr.io/google_containers/nginx-slim:0.7",
"gcr.io/google_containers/serve_hostname:v1.4", "gcr.io/google_containers/serve_hostname:v1.4",
"gcr.io/google_containers/netexec:1.7", "gcr.io/google_containers/netexec:1.7",

View File

@ -14,27 +14,24 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package e2e package e2e_node
import ( import (
"fmt" "fmt"
"os"
"path"
"path/filepath" "path/filepath"
"strconv" "syscall"
"strings"
"time" "time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
rbacv1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1" coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
"k8s.io/kubernetes/pkg/util/system"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -61,32 +58,19 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
configName = "node-problem-detector-config-" + uid configName = "node-problem-detector-config-" + uid
// There is no namespace for Node, event recorder will set default namespace for node events. // There is no namespace for Node, event recorder will set default namespace for node events.
eventNamespace = metav1.NamespaceDefault eventNamespace = metav1.NamespaceDefault
// this test wants extra permissions. Since the namespace names are unique, we can leave this
// lying around so we don't have to race any caches
framework.BindClusterRole(f.ClientSet.Rbac(), "cluster-admin", f.Namespace.Name,
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
err := framework.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
"", "create", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err)
}) })
// Test system log monitor. We may add other tests if we have more problem daemons in the future. // Test system log monitor. We may add other tests if we have more problem daemons in the future.
framework.KubeDescribe("SystemLogMonitor", func() { framework.KubeDescribe("SystemLogMonitor", func() {
const ( const (
// Use test condition to avoid conflict with real node problem detector // Use test condition to avoid changing the real node condition in use.
// TODO(random-liu): Now node condition could be arbitrary string, consider wether we need to // TODO(random-liu): Now node condition could be arbitrary string, consider wether we need to
// add TestCondition when switching to predefined condition list. // add TestCondition when switching to predefined condition list.
condition = v1.NodeConditionType("TestCondition") condition = v1.NodeConditionType("TestCondition")
startPattern = "test reboot"
// File paths used in the test. // File paths used in the test.
logDir = "/log" logFile = "/log/test.log"
logFile = "test.log" configFile = "/config/testconfig.json"
configDir = "/config"
configFile = "testconfig.json"
etcLocaltime = "/etc/localtime" etcLocaltime = "/etc/localtime"
// Volumes used in the test. // Volumes used in the test.
@ -104,40 +88,20 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
permReason2 = "Permanent2" permReason2 = "Permanent2"
permMessage2 = "permanent error 2" permMessage2 = "permanent error 2"
) )
var source, config, tmpDir string var source, config, tmpLogFile string
var lookback time.Duration var lookback time.Duration
var node *v1.Node
var eventListOptions metav1.ListOptions var eventListOptions metav1.ListOptions
injectCommand := func(timestamp time.Time, log string, num int) string {
var commands []string
for i := 0; i < num; i++ {
commands = append(commands, fmt.Sprintf("echo \"%s kernel: [0.000000] %s\" >> %s/%s",
timestamp.Format(time.Stamp), log, tmpDir, logFile))
}
return strings.Join(commands, ";")
}
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
By("Get a non master node to run the pod")
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
node = nil
for _, n := range nodes.Items {
if !system.IsMasterNode(n.Name) {
node = &n
break
}
}
Expect(node).NotTo(BeNil())
By("Calculate Lookback duration") By("Calculate Lookback duration")
nodeTime, bootTime, err = getNodeTime(node) var err error
nodeTime, bootTime, err = getNodeTime()
Expect(err).To(BeNil()) Expect(err).To(BeNil())
// Set lookback duration longer than node up time. // Set lookback duration longer than node up time.
// Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds. // Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.
lookback = nodeTime.Sub(bootTime) + time.Hour lookback = nodeTime.Sub(bootTime) + time.Hour
// Randomize the source name to avoid conflict with real node problem detector // Randomize the source name
source = "kernel-monitor-" + uid source = "kernel-monitor-" + uid
config = ` config = `
{ {
@ -147,7 +111,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
"message": "kernel: \\[.*\\] (.*)", "message": "kernel: \\[.*\\] (.*)",
"timestampFormat": "` + time.Stamp + `" "timestampFormat": "` + time.Stamp + `"
}, },
"logPath": "` + filepath.Join(logDir, logFile) + `", "logPath": "` + logFile + `",
"lookback": "` + lookback.String() + `", "lookback": "` + lookback.String() + `",
"bufferSize": 10, "bufferSize": 10,
"source": "` + source + `", "source": "` + source + `",
@ -181,30 +145,29 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
By("Generate event list options") By("Generate event list options")
selector := fields.Set{ selector := fields.Set{
"involvedObject.kind": "Node", "involvedObject.kind": "Node",
"involvedObject.name": node.Name, "involvedObject.name": framework.TestContext.NodeName,
"involvedObject.namespace": metav1.NamespaceAll, "involvedObject.namespace": metav1.NamespaceAll,
"source": source, "source": source,
}.AsSelector().String() }.AsSelector().String()
eventListOptions = metav1.ListOptions{FieldSelector: selector} eventListOptions = metav1.ListOptions{FieldSelector: selector}
By("Create the test log file") By("Create the test log file")
tmpDir = "/tmp/" + name tmpLogFile = filepath.Join("/tmp", name, path.Base(logFile))
cmd := fmt.Sprintf("mkdir %s; > %s/%s", tmpDir, tmpDir, logFile) os.MkdirAll(path.Dir(tmpLogFile), 755)
Expect(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)).To(Succeed()) _, err = os.Create(tmpLogFile)
Expect(err).NotTo(HaveOccurred())
By("Create config map for the node problem detector") By("Create config map for the node problem detector")
_, err = c.Core().ConfigMaps(ns).Create(&v1.ConfigMap{ _, err = c.Core().ConfigMaps(ns).Create(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{Name: configName},
Name: configName, Data: map[string]string{path.Base(configFile): config},
},
Data: map[string]string{configFile: config},
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Create the node problem detector") By("Create the node problem detector")
_, err = c.Core().Pods(ns).Create(&v1.Pod{ f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
NodeName: node.Name, HostNetwork: true,
SecurityContext: &v1.PodSecurityContext{}, SecurityContext: &v1.PodSecurityContext{},
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
@ -218,7 +181,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
{ {
Name: logVolume, Name: logVolume,
VolumeSource: v1.VolumeSource{ VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: tmpDir}, HostPath: &v1.HostPathVolumeSource{Path: path.Dir(tmpLogFile)},
}, },
}, },
{ {
@ -230,10 +193,9 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
}, },
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: name, Name: name,
Image: image, Image: image,
Command: []string{"/node-problem-detector", "--system-log-monitors=" + filepath.Join(configDir, configFile), "--logtostderr"}, Command: []string{"/node-problem-detector", "--logtostderr", "--system-log-monitors=" + configFile, fmt.Sprintf("--apiserver-override=%s?inClusterConfig=false", framework.TestContext.Host)},
ImagePullPolicy: v1.PullAlways,
Env: []v1.EnvVar{ Env: []v1.EnvVar{
{ {
Name: "NODE_NAME", Name: "NODE_NAME",
@ -248,7 +210,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
VolumeMounts: []v1.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: logVolume, Name: logVolume,
MountPath: logDir, MountPath: path.Dir(logFile),
}, },
{ {
Name: localtimeVolume, Name: localtimeVolume,
@ -256,16 +218,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
}, },
{ {
Name: configVolume, Name: configVolume,
MountPath: configDir, MountPath: path.Dir(configFile),
}, },
}, },
}, },
}, },
}, },
}) })
Expect(err).NotTo(HaveOccurred())
By("Wait for node problem detector running")
Expect(f.WaitForPodRunning(name)).To(Succeed())
}) })
It("should generate node condition and events for corresponding errors", func() { It("should generate node condition and events for corresponding errors", func() {
@ -357,8 +316,8 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
By(test.description) By(test.description)
if test.messageNum > 0 { if test.messageNum > 0 {
By(fmt.Sprintf("Inject %d logs: %q", test.messageNum, test.message)) By(fmt.Sprintf("Inject %d logs: %q", test.messageNum, test.message))
cmd := injectCommand(test.timestamp, test.message, test.messageNum) err := injectLog(tmpLogFile, test.timestamp, test.message, test.messageNum)
Expect(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)).To(Succeed()) Expect(err).NotTo(HaveOccurred())
} }
By(fmt.Sprintf("Wait for %d events generated", test.events)) By(fmt.Sprintf("Wait for %d events generated", test.events))
@ -372,11 +331,11 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
By(fmt.Sprintf("Make sure node condition %q is set", condition)) By(fmt.Sprintf("Make sure node condition %q is set", condition))
Eventually(func() error { Eventually(func() error {
return verifyCondition(c.Core().Nodes(), node.Name, condition, test.conditionType, test.conditionReason, test.conditionMessage) return verifyNodeCondition(c.Core().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage)
}, pollTimeout, pollInterval).Should(Succeed()) }, pollTimeout, pollInterval).Should(Succeed())
By(fmt.Sprintf("Make sure node condition %q is stable", condition)) By(fmt.Sprintf("Make sure node condition %q is stable", condition))
Consistently(func() error { Consistently(func() error {
return verifyCondition(c.Core().Nodes(), node.Name, condition, test.conditionType, test.conditionReason, test.conditionMessage) return verifyNodeCondition(c.Core().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage)
}, pollConsistent, pollInterval).Should(Succeed()) }, pollConsistent, pollInterval).Should(Succeed())
} }
}) })
@ -389,7 +348,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
framework.Logf("Node Problem Detector logs:\n %s", log) framework.Logf("Node Problem Detector logs:\n %s", log)
} }
By("Delete the node problem detector") By("Delete the node problem detector")
c.Core().Pods(ns).Delete(name, metav1.NewDeleteOptions(0)) f.PodClient().Delete(name, metav1.NewDeleteOptions(0))
By("Wait for the node problem detector to disappear") By("Wait for the node problem detector to disappear")
Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed()) Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed())
By("Delete the config map") By("Delete the config map")
@ -398,49 +357,43 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
Expect(c.Core().Events(eventNamespace).DeleteCollection(metav1.NewDeleteOptions(0), eventListOptions)).To(Succeed()) Expect(c.Core().Events(eventNamespace).DeleteCollection(metav1.NewDeleteOptions(0), eventListOptions)).To(Succeed())
By("Clean up the node condition") By("Clean up the node condition")
patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition)) patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
c.Core().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(node.Name).SubResource("status").Body(patch).Do() c.Core().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do()
By("Clean up the temporary directory") By("Clean up the temporary directory")
framework.IssueSSHCommand(fmt.Sprintf("rm -r %s", tmpDir), framework.TestContext.Provider, node) Expect(os.RemoveAll(path.Dir(tmpLogFile))).To(Succeed())
}) })
}) })
}) })
// getNodeTime gets node boot time and current time by running ssh command on the node. // injectLog injects kernel log into specified file.
func getNodeTime(node *v1.Node) (time.Time, time.Time, error) { func injectLog(file string, timestamp time.Time, log string, num int) error {
nodeIP := framework.GetNodeExternalIP(node) f, err := os.OpenFile(file, os.O_RDWR|os.O_APPEND, 0666)
if err != nil {
return err
}
for i := 0; i < num; i++ {
_, err := f.WriteString(fmt.Sprintf("%s kernel: [0.000000] %s\n", timestamp.Format(time.Stamp), log))
if err != nil {
return err
}
}
return nil
}
// getNodeTime gets node boot time and current time.
func getNodeTime() (time.Time, time.Time, error) {
// Get node current time. // Get node current time.
result, err := framework.SSH("date '+%FT%T.%N%:z'", nodeIP, framework.TestContext.Provider) nodeTime := time.Now()
if err != nil {
return time.Time{}, time.Time{}, fmt.Errorf("failed to run ssh command to get node time: %v", err)
}
if result.Code != 0 {
return time.Time{}, time.Time{}, fmt.Errorf("failed to run ssh command with error code: %d", result.Code)
}
timestamp := strings.TrimSpace(result.Stdout)
nodeTime, err := time.Parse(time.RFC3339, timestamp)
if err != nil {
return time.Time{}, time.Time{}, fmt.Errorf("failed to parse node time %q: %v", timestamp, err)
}
// Get system uptime. // Get system uptime.
result, err = framework.SSH(`cat /proc/uptime | cut -d " " -f 1`, nodeIP, framework.TestContext.Provider) var info syscall.Sysinfo_t
if err != nil { if err := syscall.Sysinfo(&info); err != nil {
return time.Time{}, time.Time{}, fmt.Errorf("failed to run ssh command to get node boot time: %v", err) return time.Time{}, time.Time{}, err
} }
if result.Code != 0 {
return time.Time{}, time.Time{}, fmt.Errorf("failed to run ssh command with error code: %d, stdout: %q, stderr: %q",
result.Code, result.Stdout, result.Stderr)
}
uptime, err := strconv.ParseFloat(strings.TrimSpace(result.Stdout), 64)
if err != nil {
return time.Time{}, time.Time{}, fmt.Errorf("failed to parse node uptime %q: %v", result.Stdout, err)
}
// Get node boot time. NOTE that because we get node current time before uptime, the boot time // Get node boot time. NOTE that because we get node current time before uptime, the boot time
// calculated will be a little earlier than the real boot time. This won't affect the correctness // calculated will be a little earlier than the real boot time. This won't affect the correctness
// of the test result. // of the test result.
bootTime := nodeTime.Add(-time.Duration(uptime * float64(time.Second))) bootTime := nodeTime.Add(-time.Duration(info.Uptime * int64(time.Second)))
return nodeTime, bootTime, nil return nodeTime, bootTime, nil
} }
@ -475,9 +428,9 @@ func verifyNoEvents(e coreclientset.EventInterface, options metav1.ListOptions)
return nil return nil
} }
// verifyCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked // verifyNodeCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked
func verifyCondition(n coreclientset.NodeInterface, nodeName string, condition v1.NodeConditionType, status v1.ConditionStatus, reason, message string) error { func verifyNodeCondition(n coreclientset.NodeInterface, condition v1.NodeConditionType, status v1.ConditionStatus, reason, message string) error {
node, err := n.Get(nodeName, metav1.GetOptions{}) node, err := n.Get(framework.TestContext.NodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }