Cleanup prober.prober

pull/6/head
Tim St. Clair 2015-10-21 15:45:34 -07:00
parent 236193a26d
commit 07e9892003
3 changed files with 96 additions and 260 deletions

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/probe"
execprobe "k8s.io/kubernetes/pkg/probe/exec"
httprobe "k8s.io/kubernetes/pkg/probe/http"
@ -68,84 +69,45 @@ func newProber(
}
// probe probes the container.
func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (probe.Result, error) {
func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (results.Result, error) {
var probeSpec *api.Probe
switch probeType {
case readiness:
return pb.probeReadiness(pod, status, container, containerID)
probeSpec = container.ReadinessProbe
case liveness:
return pb.probeLiveness(pod, status, container, containerID)
probeSpec = container.LivenessProbe
default:
return results.Failure, fmt.Errorf("Unknown probe type: %q", probeType)
}
return probe.Unknown, fmt.Errorf("Unknown probe type: %q", probeType)
}
// probeLiveness probes the liveness of a container.
func (pb *prober) probeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (probe.Result, error) {
var live probe.Result
var output string
var err error
p := container.LivenessProbe
if p == nil {
return probe.Success, nil
}
live, output, err = pb.runProbeWithRetries(p, pod, status, container, containerID, maxProbeRetries)
ctrName := fmt.Sprintf("%s:%s", kubecontainer.GetPodFullName(pod), container.Name)
if err != nil || live != probe.Success {
// Liveness failed in one way or another.
ref, ok := pb.refManager.GetRef(containerID)
if !ok {
glog.Warningf("No ref for pod %q - '%v'", containerID, container.Name)
if probeSpec == nil {
glog.Warningf("%s probe for %s is nil", probeType, ctrName)
return results.Success, nil
}
result, output, err := pb.runProbeWithRetries(probeSpec, pod, status, container, containerID, maxProbeRetries)
if err != nil || result != probe.Success {
// Probe failed in one way or another.
ref, hasRef := pb.refManager.GetRef(containerID)
if !hasRef {
glog.Warningf("No ref for container %q (%s)", containerID.String(), ctrName)
}
if err != nil {
glog.V(1).Infof("Liveness probe for %q errored: %v", ctrName, err)
if ok {
pb.recorder.Eventf(ref, "Unhealthy", "Liveness probe errored: %v", err)
glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err)
if hasRef {
pb.recorder.Eventf(ref, "Unhealthy", "%s probe errored: %v", probeType, err)
}
return probe.Unknown, err
} else { // live != probe.Success
glog.V(1).Infof("Liveness probe for %q failed (%v): %s", ctrName, live, output)
if ok {
pb.recorder.Eventf(ref, "Unhealthy", "Liveness probe failed: %s", output)
}
return live, nil
}
}
glog.V(3).Infof("Liveness probe for %q succeeded", ctrName)
return probe.Success, nil
}
// probeReadiness probes and sets the readiness of a container.
func (pb *prober) probeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (probe.Result, error) {
var ready probe.Result
var output string
var err error
p := container.ReadinessProbe
if p == nil {
return probe.Success, nil
}
ready, output, err = pb.runProbeWithRetries(p, pod, status, container, containerID, maxProbeRetries)
ctrName := fmt.Sprintf("%s:%s", kubecontainer.GetPodFullName(pod), container.Name)
if err != nil || ready == probe.Failure {
// Readiness failed in one way or another.
ref, ok := pb.refManager.GetRef(containerID)
if !ok {
glog.Warningf("No ref for pod '%v' - '%v'", containerID, container.Name)
}
if err != nil {
glog.V(1).Infof("readiness probe for %q errored: %v", ctrName, err)
if ok {
pb.recorder.Eventf(ref, "Unhealthy", "Readiness probe errored: %v", err)
}
} else { // ready != probe.Success
glog.V(1).Infof("Readiness probe for %q failed (%v): %s", ctrName, ready, output)
if ok {
pb.recorder.Eventf(ref, "Unhealthy", "Readiness probe failed: %s", output)
} else { // result != probe.Success
glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output)
if hasRef {
pb.recorder.Eventf(ref, "Unhealthy", "%s probe failed: %s", probeType, output)
}
}
return probe.Failure, err
return results.Failure, err
}
glog.V(3).Infof("Readiness probe for %q succeeded", ctrName)
return ready, nil
glog.V(3).Infof("%s probe for %q succeeded", probeType, ctrName)
return results.Success, nil
}
// runProbeWithRetries tries to probe the container in a finite loop, it returns the last result
@ -167,7 +129,7 @@ func (pb *prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, con
timeout := time.Duration(p.TimeoutSeconds) * time.Second
if p.Exec != nil {
glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command)
return pb.exec.Probe(pb.newExecInContainer(pod, container, containerID, p.Exec.Command))
return pb.exec.Probe(pb.newExecInContainer(container, containerID, p.Exec.Command))
}
if p.HTTPGet != nil {
scheme := strings.ToLower(string(p.HTTPGet.Scheme))
@ -193,7 +155,7 @@ func (pb *prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, con
return pb.tcp.Probe(status.PodIP, port, timeout)
}
glog.Warningf("Failed to find probe builder for container: %v", container)
return probe.Unknown, "", nil
return probe.Unknown, "", fmt.Errorf("Missing probe handler for %s:%s", kubecontainer.GetPodFullName(pod), container.Name)
}
func extractPort(param util.IntOrString, container api.Container) (int, error) {
@ -241,7 +203,7 @@ type execInContainer struct {
run func() ([]byte, error)
}
func (p *prober) newExecInContainer(pod *api.Pod, container api.Container, containerID kubecontainer.ContainerID, cmd []string) exec.Cmd {
func (p *prober) newExecInContainer(container api.Container, containerID kubecontainer.ContainerID, cmd []string) exec.Cmd {
return execInContainer{func() ([]byte, error) {
return p.runner.RunInContainer(containerID, cmd)
}}

View File

@ -18,11 +18,13 @@ package prober
import (
"errors"
"fmt"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/exec"
@ -163,210 +165,84 @@ func TestGetTCPAddrParts(t *testing.T) {
}
}
// TestProbeContainer tests the functionality of probeContainer.
// Test cases are:
//
// No probe.
// Only LivenessProbe.
// Only ReadinessProbe.
// Both probes.
//
// Also, for each probe, there will be several cases covering whether the initial
// delay has passed, whether the probe handler will return Success, Failure,
// Unknown or error.
//
// PLEASE READ THE PROBE DOCS BEFORE CHANGING THIS TEST IF YOU ARE UNSURE HOW PROBES ARE SUPPOSED TO WORK:
// (See https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/user-guide/pod-states.md#pod-conditions)
func TestProbeContainer(t *testing.T) {
func TestProbe(t *testing.T) {
prober := &prober{
refManager: kubecontainer.NewRefManager(),
recorder: &record.FakeRecorder{},
}
containerID := kubecontainer.ContainerID{"test", "foobar"}
execProbe := &api.Probe{
Handler: api.Handler{
Exec: &api.ExecAction{},
},
}
tests := []struct {
testContainer api.Container
expectError bool
expectedLiveness probe.Result
expectedReadiness probe.Result
probe *api.Probe
execError bool
expectError bool
execResult probe.Result
expectedResult results.Result
}{
// No probes.
{
testContainer: api.Container{},
expectedLiveness: probe.Success,
expectedReadiness: probe.Success,
{ // No probe
probe: nil,
expectedResult: results.Success,
},
// Only LivenessProbe. expectedReadiness should always be true here.
{
testContainer: api.Container{
LivenessProbe: &api.Probe{},
},
expectedLiveness: probe.Unknown,
expectedReadiness: probe.Success,
{ // No handler
probe: &api.Probe{},
expectError: true,
expectedResult: results.Failure,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedLiveness: probe.Failure,
expectedReadiness: probe.Success,
{ // Probe fails
probe: execProbe,
execResult: probe.Failure,
expectedResult: results.Failure,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedLiveness: probe.Success,
expectedReadiness: probe.Success,
{ // Probe succeeds
probe: execProbe,
execResult: probe.Success,
expectedResult: results.Success,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedLiveness: probe.Unknown,
expectedReadiness: probe.Success,
{ // Probe result is unknown
probe: execProbe,
execResult: probe.Unknown,
expectedResult: results.Failure,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectError: true,
expectedLiveness: probe.Unknown,
expectedReadiness: probe.Success,
},
// // Only ReadinessProbe. expectedLiveness should always be probe.Success here.
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{},
},
expectedLiveness: probe.Success,
expectedReadiness: probe.Unknown,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedLiveness: probe.Success,
expectedReadiness: probe.Success,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedLiveness: probe.Success,
expectedReadiness: probe.Success,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedLiveness: probe.Success,
expectedReadiness: probe.Success,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectError: false,
expectedLiveness: probe.Success,
expectedReadiness: probe.Success,
},
// Both LivenessProbe and ReadinessProbe.
{
testContainer: api.Container{
LivenessProbe: &api.Probe{},
ReadinessProbe: &api.Probe{},
},
expectedLiveness: probe.Unknown,
expectedReadiness: probe.Unknown,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
ReadinessProbe: &api.Probe{},
},
expectedLiveness: probe.Failure,
expectedReadiness: probe.Unknown,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
ReadinessProbe: &api.Probe{
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedLiveness: probe.Success,
expectedReadiness: probe.Success,
{ // Probe has an error
probe: execProbe,
execError: true,
expectError: true,
execResult: probe.Unknown,
expectedResult: results.Failure,
},
}
for i, test := range tests {
if test.expectError {
prober.exec = fakeExecProber{test.expectedLiveness, errors.New("exec error")}
} else {
prober.exec = fakeExecProber{test.expectedLiveness, nil}
}
for _, probeType := range [...]probeType{liveness, readiness} {
testID := fmt.Sprintf("%d-%s", i, probeType)
testContainer := api.Container{}
switch probeType {
case liveness:
testContainer.LivenessProbe = test.probe
case readiness:
testContainer.ReadinessProbe = test.probe
}
if test.execError {
prober.exec = fakeExecProber{test.execResult, errors.New("exec error")}
} else {
prober.exec = fakeExecProber{test.execResult, nil}
}
liveness, err := prober.probeLiveness(&api.Pod{}, api.PodStatus{}, test.testContainer, containerID)
if test.expectError && err == nil {
t.Errorf("[%d] Expected liveness probe error but no error was returned.", i)
}
if !test.expectError && err != nil {
t.Errorf("[%d] Didn't expect liveness probe error but got: %v", i, err)
}
if test.expectedLiveness != liveness {
t.Errorf("[%d] Expected liveness result to be %v but was %v", i, test.expectedLiveness, liveness)
}
// TODO: Test readiness errors
prober.exec = fakeExecProber{test.expectedReadiness, nil}
readiness, err := prober.probeReadiness(&api.Pod{}, api.PodStatus{}, test.testContainer, containerID)
if err != nil {
t.Errorf("[%d] Unexpected readiness probe error: %v", i, err)
}
if test.expectedReadiness != readiness {
t.Errorf("[%d] Expected readiness result to be %v but was %v", i, test.expectedReadiness, readiness)
result, err := prober.probe(probeType, &api.Pod{}, api.PodStatus{}, testContainer, containerID)
if test.expectError && err == nil {
t.Errorf("[%s] Expected probe error but no error was returned.", testID)
}
if !test.expectError && err != nil {
t.Errorf("[%s] Didn't expect probe error but got: %v", testID, err)
}
if test.expectedResult != result {
t.Errorf("[%s] Expected result to be %v but was %v", testID, test.expectedResult, result)
}
}
}
}

View File

@ -24,7 +24,6 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/kubernetes/pkg/util"
)
@ -164,10 +163,9 @@ func (w *worker) doProbe() (keepGoing bool) {
return true
}
// TODO: Move error handling out of prober.
result, _ := w.probeManager.prober.probe(w.probeType, w.pod, status, w.container, w.containerID)
if result != probe.Unknown {
w.resultsManager.Set(w.containerID, result != probe.Failure, w.pod)
result, err := w.probeManager.prober.probe(w.probeType, w.pod, status, w.container, w.containerID)
if err == nil {
w.resultsManager.Set(w.containerID, result, w.pod)
}
return true