k3s/pkg/kubelet/images/image_manager.go

168 lines
6.5 KiB
Go
Raw Normal View History

/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"fmt"
dockerref "github.com/docker/distribution/reference"
Make container create, start, and stop events consistent The messages for container lifecycle events are subtly inconsistent and should be unified. First, the field format for containers is hard to parse for a human, so include the container name directly in the message for create and start, and for kill remove the container runtime prefix. Second, the pulling image event has inconsistent capitalization, fix that to be sentence without punctuation. Third, the kill container event was unnecessarily wordy and inconsistent with the create and start events. Make the following changes: * Use 'Stopping' instead of 'Killing' since kill is usually reserved for when we decide to hard stop a container * Send the event before we dispatch the prestop hook, since this is an "in-progress" style event vs a "already completed" type event * Remove the 'cri-o://' / 'docker://' prefix by printing the container name instead of id (we already do that replacement at the lower level to prevent high cardinality events) * Use 'message' instead of 'reason' as the argument name since this is a string for humans field, not a string for machines field * Remove the hash values on the container spec changed event because no human will ever be able to do anything with the hash value * Use 'Stopping container %s(, explanation)?' form without periods to follow event conventions The end result is a more pleasant message for humans: ``` 35m Normal Created Pod Created container 35m Normal Started Pod Started container 10m Normal Killing Pod Killing container cri-o://installer:Need to kill Pod 10m Normal Pulling Pod pulling image "registry.svc.ci.openshift.org/openshift/origin-v4.0-2019-02-10-172026@sha256:3da5303d4384d24691721c1cf2333584ba60e8f82c9e782f593623ce8f83ddc5" ``` becomes ``` 35m Normal Created Pod Created container installer 35m Normal Started Pod Started container installer 10m Normal Killing Pod Stopping container installer 10m Normal Pulling Pod Pulling image "registry.svc.ci.openshift.org/openshift/origin-v4.0-2019-02-10-172026@sha256:3da5303d4384d24691721c1cf2333584ba60e8f82c9e782f593623ce8f83ddc5" ```
2019-02-10 21:39:42 +00:00
v1 "k8s.io/api/core/v1"
2017-01-30 18:39:54 +00:00
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/util/parsers"
)
2016-07-15 21:54:06 +00:00
// imageManager provides the functionalities for image pulling.
type imageManager struct {
recorder record.EventRecorder
imageService kubecontainer.ImageService
backOff *flowcontrol.Backoff
2016-07-15 21:54:06 +00:00
// It will check the presence of the image, and report the 'image pulling', image pulled' events correspondingly.
puller imagePuller
}
2016-07-15 21:54:06 +00:00
var _ ImageManager = &imageManager{}
// NewImageManager instantiates a new ImageManager object.
func NewImageManager(recorder record.EventRecorder, imageService kubecontainer.ImageService, imageBackOff *flowcontrol.Backoff, serialized bool, qps float32, burst int) ImageManager {
imageService = throttleImagePulling(imageService, qps, burst)
2016-07-15 21:54:06 +00:00
var puller imagePuller
if serialized {
puller = newSerialImagePuller(imageService)
2016-07-15 21:54:06 +00:00
} else {
puller = newParallelImagePuller(imageService)
2016-07-15 21:54:06 +00:00
}
return &imageManager{
recorder: recorder,
imageService: imageService,
backOff: imageBackOff,
puller: puller,
}
}
// shouldPullImage returns whether we should pull an image according to
// the presence and pull policy of the image.
2016-11-18 20:50:58 +00:00
func shouldPullImage(container *v1.Container, imagePresent bool) bool {
if container.ImagePullPolicy == v1.PullNever {
return false
}
2016-11-18 20:50:58 +00:00
if container.ImagePullPolicy == v1.PullAlways ||
(container.ImagePullPolicy == v1.PullIfNotPresent && (!imagePresent)) {
return true
}
return false
}
// records an event using ref, event msg. log to glog using prefix, msg, logFn
2016-11-18 20:50:58 +00:00
func (m *imageManager) logIt(ref *v1.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) {
if ref != nil {
m.recorder.Event(ref, eventtype, event, msg)
} else {
logFn(fmt.Sprint(prefix, " ", msg))
}
}
// EnsureImageExists pulls the image for the specified pod and container, and returns
// (imageRef, error message, error).
func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error) {
logPrefix := fmt.Sprintf("%s/%s", pod.Name, container.Image)
ref, err := kubecontainer.GenerateContainerRef(pod, container)
if err != nil {
klog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err)
}
// If the image contains no tag or digest, a default tag should be applied.
image, err := applyDefaultImageTag(container.Image)
if err != nil {
msg := fmt.Sprintf("Failed to apply default image tag %q: %v", container.Image, err)
m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning)
return "", msg, ErrInvalidImageName
}
spec := kubecontainer.ImageSpec{Image: image}
imageRef, err := m.imageService.GetImageRef(spec)
if err != nil {
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning)
return "", msg, ErrImageInspect
}
present := imageRef != ""
if !shouldPullImage(container, present) {
if present {
msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, klog.Info)
return imageRef, "", nil
}
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, klog.Warning)
return "", msg, ErrImageNeverPull
}
2016-02-23 01:01:55 +00:00
backOffKey := fmt.Sprintf("%s_%s", pod.UID, container.Image)
2016-07-15 21:54:06 +00:00
if m.backOff.IsInBackOffSinceUpdate(backOffKey, m.backOff.Clock.Now()) {
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
m.logIt(ref, v1.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, klog.Info)
return "", msg, ErrImagePullBackOff
}
Make container create, start, and stop events consistent The messages for container lifecycle events are subtly inconsistent and should be unified. First, the field format for containers is hard to parse for a human, so include the container name directly in the message for create and start, and for kill remove the container runtime prefix. Second, the pulling image event has inconsistent capitalization, fix that to be sentence without punctuation. Third, the kill container event was unnecessarily wordy and inconsistent with the create and start events. Make the following changes: * Use 'Stopping' instead of 'Killing' since kill is usually reserved for when we decide to hard stop a container * Send the event before we dispatch the prestop hook, since this is an "in-progress" style event vs a "already completed" type event * Remove the 'cri-o://' / 'docker://' prefix by printing the container name instead of id (we already do that replacement at the lower level to prevent high cardinality events) * Use 'message' instead of 'reason' as the argument name since this is a string for humans field, not a string for machines field * Remove the hash values on the container spec changed event because no human will ever be able to do anything with the hash value * Use 'Stopping container %s(, explanation)?' form without periods to follow event conventions The end result is a more pleasant message for humans: ``` 35m Normal Created Pod Created container 35m Normal Started Pod Started container 10m Normal Killing Pod Killing container cri-o://installer:Need to kill Pod 10m Normal Pulling Pod pulling image "registry.svc.ci.openshift.org/openshift/origin-v4.0-2019-02-10-172026@sha256:3da5303d4384d24691721c1cf2333584ba60e8f82c9e782f593623ce8f83ddc5" ``` becomes ``` 35m Normal Created Pod Created container installer 35m Normal Started Pod Started container installer 10m Normal Killing Pod Stopping container installer 10m Normal Pulling Pod Pulling image "registry.svc.ci.openshift.org/openshift/origin-v4.0-2019-02-10-172026@sha256:3da5303d4384d24691721c1cf2333584ba60e8f82c9e782f593623ce8f83ddc5" ```
2019-02-10 21:39:42 +00:00
m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("Pulling image %q", container.Image), klog.Info)
pullChan := make(chan pullResult)
m.puller.pullImage(spec, pullSecrets, pullChan, podSandboxConfig)
imagePullResult := <-pullChan
if imagePullResult.err != nil {
m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), klog.Warning)
2016-07-15 21:54:06 +00:00
m.backOff.Next(backOffKey, m.backOff.Clock.Now())
if imagePullResult.err == ErrRegistryUnavailable {
msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image)
return "", msg, imagePullResult.err
}
return "", imagePullResult.err.Error(), ErrImagePull
}
m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), klog.Info)
2016-07-15 21:54:06 +00:00
m.backOff.GC()
return imagePullResult.imageRef, "", nil
}
// applyDefaultImageTag parses a docker image string, if it doesn't contain any tag or digest,
// a default tag will be applied.
func applyDefaultImageTag(image string) (string, error) {
named, err := dockerref.ParseNormalizedNamed(image)
if err != nil {
return "", fmt.Errorf("couldn't parse image reference %q: %v", image, err)
}
_, isTagged := named.(dockerref.Tagged)
_, isDigested := named.(dockerref.Digested)
if !isTagged && !isDigested {
// we just concatenate the image name with the default tag here instead
// of using dockerref.WithTag(named, ...) because that would cause the
// image to be fully qualified as docker.io/$name if it's a short name
// (e.g. just busybox). We don't want that to happen to keep the CRI
// agnostic wrt image names and default hostnames.
image = image + ":" + parsers.DefaultImageTag
}
return image, nil
}