2019-01-12 04:58:27 +00:00
|
|
|
/*
|
|
|
|
Copyright 2015 The Kubernetes Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package prober
|
|
|
|
|
|
|
|
import (
|
|
|
|
"sync"
|
|
|
|
|
2019-09-27 21:51:53 +00:00
|
|
|
v1 "k8s.io/api/core/v1"
|
2019-01-12 04:58:27 +00:00
|
|
|
"k8s.io/apimachinery/pkg/types"
|
|
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
2019-09-27 21:51:53 +00:00
|
|
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
2019-01-12 04:58:27 +00:00
|
|
|
"k8s.io/client-go/tools/record"
|
2019-09-27 21:51:53 +00:00
|
|
|
"k8s.io/component-base/metrics"
|
2019-01-12 04:58:27 +00:00
|
|
|
"k8s.io/klog"
|
2019-09-27 21:51:53 +00:00
|
|
|
"k8s.io/kubernetes/pkg/features"
|
2019-01-12 04:58:27 +00:00
|
|
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
|
|
|
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
|
|
|
"k8s.io/kubernetes/pkg/kubelet/status"
|
|
|
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
|
|
|
)
|
|
|
|
|
2019-08-30 18:33:25 +00:00
|
|
|
// ProberResults stores the cumulative number of a probe by result as prometheus metrics.
|
2019-09-27 21:51:53 +00:00
|
|
|
var ProberResults = metrics.NewCounterVec(
|
|
|
|
&metrics.CounterOpts{
|
|
|
|
Subsystem: "prober",
|
|
|
|
Name: "probe_total",
|
|
|
|
Help: "Cumulative number of a liveness, readiness or startup probe for a container by result.",
|
|
|
|
StabilityLevel: metrics.ALPHA,
|
2019-01-12 04:58:27 +00:00
|
|
|
},
|
2019-04-07 17:07:55 +00:00
|
|
|
[]string{"probe_type",
|
2019-08-30 18:33:25 +00:00
|
|
|
"result",
|
2019-04-07 17:07:55 +00:00
|
|
|
"container",
|
|
|
|
"pod",
|
|
|
|
"namespace",
|
|
|
|
"pod_uid"},
|
2019-01-12 04:58:27 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Manager manages pod probing. It creates a probe "worker" for every container that specifies a
|
|
|
|
// probe (AddPod). The worker periodically probes its assigned container and caches the results. The
|
|
|
|
// manager use the cached probe results to set the appropriate Ready state in the PodStatus when
|
|
|
|
// requested (UpdatePodStatus). Updating probe parameters is not currently supported.
|
|
|
|
// TODO: Move liveness probing out of the runtime, to here.
|
|
|
|
type Manager interface {
|
|
|
|
// AddPod creates new probe workers for every container probe. This should be called for every
|
|
|
|
// pod created.
|
|
|
|
AddPod(pod *v1.Pod)
|
|
|
|
|
|
|
|
// RemovePod handles cleaning up the removed pod state, including terminating probe workers and
|
|
|
|
// deleting cached results.
|
|
|
|
RemovePod(pod *v1.Pod)
|
|
|
|
|
|
|
|
// CleanupPods handles cleaning up pods which should no longer be running.
|
2019-09-27 21:51:53 +00:00
|
|
|
// It takes a map of "desired pods" which should not be cleaned up.
|
|
|
|
CleanupPods(desiredPods map[types.UID]sets.Empty)
|
2019-01-12 04:58:27 +00:00
|
|
|
|
|
|
|
// UpdatePodStatus modifies the given PodStatus with the appropriate Ready state for each
|
|
|
|
// container based on container running status, cached probe results and worker states.
|
|
|
|
UpdatePodStatus(types.UID, *v1.PodStatus)
|
|
|
|
|
|
|
|
// Start starts the Manager sync loops.
|
|
|
|
Start()
|
|
|
|
}
|
|
|
|
|
|
|
|
type manager struct {
|
|
|
|
// Map of active workers for probes
|
|
|
|
workers map[probeKey]*worker
|
|
|
|
// Lock for accessing & mutating workers
|
|
|
|
workerLock sync.RWMutex
|
|
|
|
|
|
|
|
// The statusManager cache provides pod IP and container IDs for probing.
|
|
|
|
statusManager status.Manager
|
|
|
|
|
|
|
|
// readinessManager manages the results of readiness probes
|
|
|
|
readinessManager results.Manager
|
|
|
|
|
|
|
|
// livenessManager manages the results of liveness probes
|
|
|
|
livenessManager results.Manager
|
|
|
|
|
2019-09-27 21:51:53 +00:00
|
|
|
// startupManager manages the results of startup probes
|
|
|
|
startupManager results.Manager
|
|
|
|
|
2019-01-12 04:58:27 +00:00
|
|
|
// prober executes the probe actions.
|
|
|
|
prober *prober
|
|
|
|
}
|
|
|
|
|
2019-09-27 21:51:53 +00:00
|
|
|
// NewManager creates a Manager for pod probing.
|
2019-01-12 04:58:27 +00:00
|
|
|
func NewManager(
|
|
|
|
statusManager status.Manager,
|
|
|
|
livenessManager results.Manager,
|
2019-12-12 01:27:03 +00:00
|
|
|
startupManager results.Manager,
|
2019-01-12 04:58:27 +00:00
|
|
|
runner kubecontainer.ContainerCommandRunner,
|
|
|
|
refManager *kubecontainer.RefManager,
|
|
|
|
recorder record.EventRecorder) Manager {
|
|
|
|
|
|
|
|
prober := newProber(runner, refManager, recorder)
|
|
|
|
readinessManager := results.NewManager()
|
|
|
|
return &manager{
|
|
|
|
statusManager: statusManager,
|
|
|
|
prober: prober,
|
|
|
|
readinessManager: readinessManager,
|
|
|
|
livenessManager: livenessManager,
|
2019-09-27 21:51:53 +00:00
|
|
|
startupManager: startupManager,
|
2019-01-12 04:58:27 +00:00
|
|
|
workers: make(map[probeKey]*worker),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start syncing probe status. This should only be called once.
|
|
|
|
func (m *manager) Start() {
|
|
|
|
// Start syncing readiness.
|
|
|
|
go wait.Forever(m.updateReadiness, 0)
|
2019-09-27 21:51:53 +00:00
|
|
|
// Start syncing startup.
|
|
|
|
go wait.Forever(m.updateStartup, 0)
|
2019-01-12 04:58:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Key uniquely identifying container probes
|
|
|
|
type probeKey struct {
|
|
|
|
podUID types.UID
|
|
|
|
containerName string
|
|
|
|
probeType probeType
|
|
|
|
}
|
|
|
|
|
2019-09-27 21:51:53 +00:00
|
|
|
// Type of probe (liveness, readiness or startup)
|
2019-01-12 04:58:27 +00:00
|
|
|
type probeType int
|
|
|
|
|
|
|
|
const (
|
|
|
|
liveness probeType = iota
|
|
|
|
readiness
|
2019-09-27 21:51:53 +00:00
|
|
|
startup
|
2019-08-30 18:33:25 +00:00
|
|
|
|
|
|
|
probeResultSuccessful string = "successful"
|
|
|
|
probeResultFailed string = "failed"
|
|
|
|
probeResultUnknown string = "unknown"
|
2019-01-12 04:58:27 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// For debugging.
|
|
|
|
func (t probeType) String() string {
|
|
|
|
switch t {
|
|
|
|
case readiness:
|
|
|
|
return "Readiness"
|
|
|
|
case liveness:
|
|
|
|
return "Liveness"
|
2019-09-27 21:51:53 +00:00
|
|
|
case startup:
|
|
|
|
return "Startup"
|
2019-01-12 04:58:27 +00:00
|
|
|
default:
|
|
|
|
return "UNKNOWN"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *manager) AddPod(pod *v1.Pod) {
|
|
|
|
m.workerLock.Lock()
|
|
|
|
defer m.workerLock.Unlock()
|
|
|
|
|
|
|
|
key := probeKey{podUID: pod.UID}
|
|
|
|
for _, c := range pod.Spec.Containers {
|
|
|
|
key.containerName = c.Name
|
|
|
|
|
2019-09-27 21:51:53 +00:00
|
|
|
if c.StartupProbe != nil && utilfeature.DefaultFeatureGate.Enabled(features.StartupProbe) {
|
|
|
|
key.probeType = startup
|
|
|
|
if _, ok := m.workers[key]; ok {
|
|
|
|
klog.Errorf("Startup probe already exists! %v - %v",
|
|
|
|
format.Pod(pod), c.Name)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w := newWorker(m, startup, pod, c)
|
|
|
|
m.workers[key] = w
|
|
|
|
go w.run()
|
|
|
|
}
|
|
|
|
|
2019-01-12 04:58:27 +00:00
|
|
|
if c.ReadinessProbe != nil {
|
|
|
|
key.probeType = readiness
|
|
|
|
if _, ok := m.workers[key]; ok {
|
|
|
|
klog.Errorf("Readiness probe already exists! %v - %v",
|
|
|
|
format.Pod(pod), c.Name)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w := newWorker(m, readiness, pod, c)
|
|
|
|
m.workers[key] = w
|
|
|
|
go w.run()
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.LivenessProbe != nil {
|
|
|
|
key.probeType = liveness
|
|
|
|
if _, ok := m.workers[key]; ok {
|
|
|
|
klog.Errorf("Liveness probe already exists! %v - %v",
|
|
|
|
format.Pod(pod), c.Name)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w := newWorker(m, liveness, pod, c)
|
|
|
|
m.workers[key] = w
|
|
|
|
go w.run()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *manager) RemovePod(pod *v1.Pod) {
|
|
|
|
m.workerLock.RLock()
|
|
|
|
defer m.workerLock.RUnlock()
|
|
|
|
|
|
|
|
key := probeKey{podUID: pod.UID}
|
|
|
|
for _, c := range pod.Spec.Containers {
|
|
|
|
key.containerName = c.Name
|
2019-09-27 21:51:53 +00:00
|
|
|
for _, probeType := range [...]probeType{readiness, liveness, startup} {
|
2019-01-12 04:58:27 +00:00
|
|
|
key.probeType = probeType
|
|
|
|
if worker, ok := m.workers[key]; ok {
|
|
|
|
worker.stop()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-27 21:51:53 +00:00
|
|
|
func (m *manager) CleanupPods(desiredPods map[types.UID]sets.Empty) {
|
2019-01-12 04:58:27 +00:00
|
|
|
m.workerLock.RLock()
|
|
|
|
defer m.workerLock.RUnlock()
|
|
|
|
|
|
|
|
for key, worker := range m.workers {
|
|
|
|
if _, ok := desiredPods[key.podUID]; !ok {
|
|
|
|
worker.stop()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *v1.PodStatus) {
|
|
|
|
for i, c := range podStatus.ContainerStatuses {
|
2019-09-27 21:51:53 +00:00
|
|
|
var started bool
|
|
|
|
if c.State.Running == nil {
|
|
|
|
started = false
|
|
|
|
} else if !utilfeature.DefaultFeatureGate.Enabled(features.StartupProbe) {
|
|
|
|
// the container is running, assume it is started if the StartupProbe feature is disabled
|
|
|
|
started = true
|
|
|
|
} else if result, ok := m.startupManager.Get(kubecontainer.ParseContainerID(c.ContainerID)); ok {
|
|
|
|
started = result == results.Success
|
|
|
|
} else {
|
|
|
|
// The check whether there is a probe which hasn't run yet.
|
|
|
|
_, exists := m.getWorker(podUID, c.Name, startup)
|
|
|
|
started = !exists
|
|
|
|
}
|
|
|
|
podStatus.ContainerStatuses[i].Started = &started
|
2020-07-17 23:14:37 +00:00
|
|
|
|
|
|
|
if started {
|
|
|
|
var ready bool
|
|
|
|
if c.State.Running == nil {
|
|
|
|
ready = false
|
|
|
|
} else if result, ok := m.readinessManager.Get(kubecontainer.ParseContainerID(c.ContainerID)); ok {
|
|
|
|
ready = result == results.Success
|
|
|
|
} else {
|
|
|
|
// The check whether there is a probe which hasn't run yet.
|
|
|
|
_, exists := m.getWorker(podUID, c.Name, readiness)
|
|
|
|
ready = !exists
|
|
|
|
}
|
|
|
|
podStatus.ContainerStatuses[i].Ready = ready
|
|
|
|
}
|
2019-01-12 04:58:27 +00:00
|
|
|
}
|
|
|
|
// init containers are ready if they have exited with success or if a readiness probe has
|
|
|
|
// succeeded.
|
|
|
|
for i, c := range podStatus.InitContainerStatuses {
|
|
|
|
var ready bool
|
|
|
|
if c.State.Terminated != nil && c.State.Terminated.ExitCode == 0 {
|
|
|
|
ready = true
|
|
|
|
}
|
|
|
|
podStatus.InitContainerStatuses[i].Ready = ready
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *manager) getWorker(podUID types.UID, containerName string, probeType probeType) (*worker, bool) {
|
|
|
|
m.workerLock.RLock()
|
|
|
|
defer m.workerLock.RUnlock()
|
|
|
|
worker, ok := m.workers[probeKey{podUID, containerName, probeType}]
|
|
|
|
return worker, ok
|
|
|
|
}
|
|
|
|
|
|
|
|
// Called by the worker after exiting.
|
|
|
|
func (m *manager) removeWorker(podUID types.UID, containerName string, probeType probeType) {
|
|
|
|
m.workerLock.Lock()
|
|
|
|
defer m.workerLock.Unlock()
|
|
|
|
delete(m.workers, probeKey{podUID, containerName, probeType})
|
|
|
|
}
|
|
|
|
|
|
|
|
// workerCount returns the total number of probe workers. For testing.
|
|
|
|
func (m *manager) workerCount() int {
|
|
|
|
m.workerLock.RLock()
|
|
|
|
defer m.workerLock.RUnlock()
|
|
|
|
return len(m.workers)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *manager) updateReadiness() {
|
|
|
|
update := <-m.readinessManager.Updates()
|
|
|
|
|
|
|
|
ready := update.Result == results.Success
|
|
|
|
m.statusManager.SetContainerReadiness(update.PodUID, update.ContainerID, ready)
|
|
|
|
}
|
2019-09-27 21:51:53 +00:00
|
|
|
|
|
|
|
func (m *manager) updateStartup() {
|
|
|
|
update := <-m.startupManager.Updates()
|
|
|
|
|
|
|
|
started := update.Result == results.Success
|
|
|
|
m.statusManager.SetContainerStartup(update.PodUID, update.ContainerID, started)
|
|
|
|
}
|