Use fnv hash in the CRI implementation

Leave the old implementation (dockertools/rkt) untouched so that
containers will not be restarted during kubelet upgrade. For CRI
implementation (kuberuntime), container restart is required for kubelet
upgrade.
pull/6/head
Yu-Ju Hong 2017-01-25 15:01:41 -08:00
parent 560f52390a
commit 87f1291c19
7 changed files with 26 additions and 16 deletions

View File

@ -20,6 +20,7 @@ import (
"bytes"
"fmt"
"hash/adler32"
"hash/fnv"
"strings"
"time"
@ -91,6 +92,15 @@ func ShouldContainerBeRestarted(container *v1.Container, pod *v1.Pod, podStatus
// HashContainer returns the hash of the container. It is used to compare
// the running container with its desired spec.
func HashContainer(container *v1.Container) uint64 {
hash := fnv.New32a()
hashutil.DeepHashObject(hash, *container)
return uint64(hash.Sum32())
}
// HashContainerLegacy returns the hash of the container. It is used to compare
// the running container with its desired spec.
// TODO: Delete this function after we deprecate dockertools.
func HashContainerLegacy(container *v1.Container) uint64 {
hash := adler32.New()
hashutil.DeepHashObject(hash, *container)
return uint64(hash.Sum32())

View File

@ -302,7 +302,7 @@ func (p dockerPuller) GetImageRef(image string) (string, error) {
// only occur when instances of the same container in the same pod have the same UID. The
// chance is really slim.
func BuildDockerName(dockerName KubeletContainerName, container *v1.Container) (string, string, string) {
containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16)
containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainerLegacy(container), 16)
stableName := fmt.Sprintf("%s_%s_%s_%s",
containerNamePrefix,
containerName,

View File

@ -1075,7 +1075,7 @@ func (dm *DockerManager) podInfraContainerChanged(pod *v1.Pod, podInfraContainer
ImagePullPolicy: podInfraContainerImagePullPolicy,
Env: dm.podInfraContainerEnv,
}
return podInfraContainerStatus.Hash != kubecontainer.HashContainer(expectedPodInfraContainer), nil
return podInfraContainerStatus.Hash != kubecontainer.HashContainerLegacy(expectedPodInfraContainer), nil
}
// determine if the container root should be a read only filesystem.
@ -2087,7 +2087,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *v1.Pod, podStatus *kube
// At this point, the container is running and pod infra container is good.
// We will look for changes and check healthiness for the container.
expectedHash := kubecontainer.HashContainer(&container)
expectedHash := kubecontainer.HashContainerLegacy(&container)
hash := containerStatus.Hash
containerChanged := hash != 0 && hash != expectedHash
if containerChanged {

View File

@ -585,7 +585,7 @@ func generatePodInfraContainerHash(pod *v1.Pod) uint64 {
Ports: ports,
ImagePullPolicy: podInfraContainerImagePullPolicy,
}
return kubecontainer.HashContainer(container)
return kubecontainer.HashContainerLegacy(container)
}
// runSyncPod is a helper function to retrieve the running pods from the fake
@ -855,7 +855,7 @@ func TestSyncPodsDoesNothing(t *testing.T) {
fakeDocker.SetFakeRunningContainers([]*FakeContainer{
{
ID: "1234",
Name: "/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainer(&container), 16) + "_foo_new_12345678_0",
Name: "/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainerLegacy(&container), 16) + "_foo_new_12345678_0",
},
{
ID: "9876",
@ -885,14 +885,14 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
},
{
ID: "1234",
Name: "/k8s_succeeded." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0",
Name: "/k8s_succeeded." + strconv.FormatUint(kubecontainer.HashContainerLegacy(&containers[0]), 16) + "_foo_new_12345678_0",
ExitCode: 0,
StartedAt: time.Now(),
FinishedAt: time.Now(),
},
{
ID: "5678",
Name: "/k8s_failed." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_foo_new_12345678_0",
Name: "/k8s_failed." + strconv.FormatUint(kubecontainer.HashContainerLegacy(&containers[1]), 16) + "_foo_new_12345678_0",
ExitCode: 42,
StartedAt: time.Now(),
FinishedAt: time.Now(),
@ -964,7 +964,7 @@ func TestSyncPodBackoff(t *testing.T) {
Containers: containers,
})
stableId := "k8s_bad." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_podfoo_new_12345678"
stableId := "k8s_bad." + strconv.FormatUint(kubecontainer.HashContainerLegacy(&containers[1]), 16) + "_podfoo_new_12345678"
dockerContainers := []*FakeContainer{
{
ID: "9876",
@ -974,13 +974,13 @@ func TestSyncPodBackoff(t *testing.T) {
},
{
ID: "1234",
Name: "/k8s_good." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_podfoo_new_12345678_0",
Name: "/k8s_good." + strconv.FormatUint(kubecontainer.HashContainerLegacy(&containers[0]), 16) + "_podfoo_new_12345678_0",
StartedAt: startTime,
Running: true,
},
{
ID: "5678",
Name: "/k8s_bad." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_podfoo_new_12345678_0",
Name: "/k8s_bad." + strconv.FormatUint(kubecontainer.HashContainerLegacy(&containers[1]), 16) + "_podfoo_new_12345678_0",
ExitCode: 42,
StartedAt: startTime,
FinishedAt: fakeClock.Now(),

View File

@ -82,7 +82,7 @@ func newLabels(container *v1.Container, pod *v1.Pod, restartCount int, enableCus
}
labels[types.KubernetesContainerNameLabel] = container.Name
labels[kubernetesContainerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
labels[kubernetesContainerHashLabel] = strconv.FormatUint(kubecontainer.HashContainerLegacy(container), 16)
labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(restartCount)
labels[kubernetesContainerTerminationMessagePathLabel] = container.TerminationMessagePath
labels[kubernetesContainerTerminationMessagePolicyLabel] = string(container.TerminationMessagePolicy)

View File

@ -90,7 +90,7 @@ func TestLabels(t *testing.T) {
PodDeletionGracePeriod: pod.DeletionGracePeriodSeconds,
PodTerminationGracePeriod: pod.Spec.TerminationGracePeriodSeconds,
Name: container.Name,
Hash: strconv.FormatUint(kubecontainer.HashContainer(container), 16),
Hash: strconv.FormatUint(kubecontainer.HashContainerLegacy(container), 16),
RestartCount: restartCount,
TerminationMessagePath: container.TerminationMessagePath,
PreStopHandler: container.Lifecycle.PreStop,
@ -113,7 +113,7 @@ func TestLabels(t *testing.T) {
expected.PodTerminationGracePeriod = nil
expected.PreStopHandler = nil
// Because container is changed, the Hash should be updated
expected.Hash = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
expected.Hash = strconv.FormatUint(kubecontainer.HashContainerLegacy(container), 16)
labels = newLabels(container, pod, restartCount, false)
containerInfo = getContainerInfoFromLabel(labels)
if !reflect.DeepEqual(containerInfo, expected) {

View File

@ -772,7 +772,7 @@ func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP string, c v1.Container, r
var annotations appctypes.Annotations = []appctypes.Annotation{
{
Name: *appctypes.MustACIdentifier(k8sRktContainerHashAnno),
Value: strconv.FormatUint(kubecontainer.HashContainer(&c), 10),
Value: strconv.FormatUint(kubecontainer.HashContainerLegacy(&c), 10),
},
{
Name: *appctypes.MustACIdentifier(types.KubernetesContainerNameLabel),
@ -923,7 +923,7 @@ func apiPodToruntimePod(uuid string, pod *v1.Pod) *kubecontainer.Pod {
ID: buildContainerID(&containerID{uuid, c.Name}),
Name: c.Name,
Image: c.Image,
Hash: kubecontainer.HashContainer(c),
Hash: kubecontainer.HashContainerLegacy(c),
})
}
return p
@ -1721,7 +1721,7 @@ func (r *Runtime) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer.
restartPod := false
for _, container := range pod.Spec.Containers {
expectedHash := kubecontainer.HashContainer(&container)
expectedHash := kubecontainer.HashContainerLegacy(&container)
c := runningPod.FindContainerByName(container.Name)
if c == nil {