mirror of https://github.com/k3s-io/k3s
Merge pull request #74809 from oxddr/secrets-and-maps
Fix secret/configmap management for terminated podspull/564/head
commit
243e740885
|
@ -168,20 +168,40 @@ func (pm *basicManager) UpdatePod(pod *v1.Pod) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isPodInTerminatedState(pod *v1.Pod) bool {
|
||||||
|
return pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded
|
||||||
|
}
|
||||||
|
|
||||||
// updatePodsInternal replaces the given pods in the current state of the
|
// updatePodsInternal replaces the given pods in the current state of the
|
||||||
// manager, updating the various indices. The caller is assumed to hold the
|
// manager, updating the various indices. The caller is assumed to hold the
|
||||||
// lock.
|
// lock.
|
||||||
func (pm *basicManager) updatePodsInternal(pods ...*v1.Pod) {
|
func (pm *basicManager) updatePodsInternal(pods ...*v1.Pod) {
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if pm.secretManager != nil {
|
if pm.secretManager != nil {
|
||||||
// TODO: Consider detecting only status update and in such case do
|
if isPodInTerminatedState(pod) {
|
||||||
// not register pod, as it doesn't really matter.
|
// Pods that are in terminated state and no longer running can be
|
||||||
pm.secretManager.RegisterPod(pod)
|
// ignored as they no longer require access to secrets.
|
||||||
|
// It is especially important in watch-based manager, to avoid
|
||||||
|
// unnecessary watches for terminated pods waiting for GC.
|
||||||
|
pm.secretManager.UnregisterPod(pod)
|
||||||
|
} else {
|
||||||
|
// TODO: Consider detecting only status update and in such case do
|
||||||
|
// not register pod, as it doesn't really matter.
|
||||||
|
pm.secretManager.RegisterPod(pod)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if pm.configMapManager != nil {
|
if pm.configMapManager != nil {
|
||||||
// TODO: Consider detecting only status update and in such case do
|
if isPodInTerminatedState(pod) {
|
||||||
// not register pod, as it doesn't really matter.
|
// Pods that are in terminated state and no longer running can be
|
||||||
pm.configMapManager.RegisterPod(pod)
|
// ignored as they no longer require access to configmaps.
|
||||||
|
// It is especially important in watch-based manager, to avoid
|
||||||
|
// unnecessary watches for terminated pods waiting for GC.
|
||||||
|
pm.configMapManager.UnregisterPod(pod)
|
||||||
|
} else {
|
||||||
|
// TODO: Consider detecting only status update and in such case do
|
||||||
|
// not register pod, as it doesn't really matter.
|
||||||
|
pm.configMapManager.RegisterPod(pod)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
podFullName := kubecontainer.GetPodFullName(pod)
|
podFullName := kubecontainer.GetPodFullName(pod)
|
||||||
// This logic relies on a static pod and its mirror to have the same name.
|
// This logic relies on a static pod and its mirror to have the same name.
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -429,6 +429,41 @@ func TestCacheInvalidation(t *testing.T) {
|
||||||
fakeClient.ClearActions()
|
fakeClient.ClearActions()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRegisterIdempotence(t *testing.T) {
|
||||||
|
fakeClient := &fake.Clientset{}
|
||||||
|
fakeClock := clock.NewFakeClock(time.Now())
|
||||||
|
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
|
||||||
|
manager := newCacheBasedSecretManager(store)
|
||||||
|
|
||||||
|
s1 := secretsToAttach{
|
||||||
|
imagePullSecretNames: []string{"s1"},
|
||||||
|
}
|
||||||
|
|
||||||
|
refs := func(ns, name string) int {
|
||||||
|
store.lock.Lock()
|
||||||
|
defer store.lock.Unlock()
|
||||||
|
item, ok := store.items[objectKey{ns, name}]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return item.refCount
|
||||||
|
}
|
||||||
|
|
||||||
|
manager.RegisterPod(podWithSecrets("ns1", "name1", s1))
|
||||||
|
assert.Equal(t, 1, refs("ns1", "s1"))
|
||||||
|
manager.RegisterPod(podWithSecrets("ns1", "name1", s1))
|
||||||
|
assert.Equal(t, 1, refs("ns1", "s1"))
|
||||||
|
manager.RegisterPod(podWithSecrets("ns1", "name2", s1))
|
||||||
|
assert.Equal(t, 2, refs("ns1", "s1"))
|
||||||
|
|
||||||
|
manager.UnregisterPod(podWithSecrets("ns1", "name1", s1))
|
||||||
|
assert.Equal(t, 1, refs("ns1", "s1"))
|
||||||
|
manager.UnregisterPod(podWithSecrets("ns1", "name1", s1))
|
||||||
|
assert.Equal(t, 1, refs("ns1", "s1"))
|
||||||
|
manager.UnregisterPod(podWithSecrets("ns1", "name2", s1))
|
||||||
|
assert.Equal(t, 0, refs("ns1", "s1"))
|
||||||
|
}
|
||||||
|
|
||||||
func TestCacheRefcounts(t *testing.T) {
|
func TestCacheRefcounts(t *testing.T) {
|
||||||
fakeClient := &fake.Clientset{}
|
fakeClient := &fake.Clientset{}
|
||||||
fakeClock := clock.NewFakeClock(time.Now())
|
fakeClock := clock.NewFakeClock(time.Now())
|
||||||
|
|
|
@ -32,10 +32,14 @@ type Manager interface {
|
||||||
// i.e. should not block on network operations.
|
// i.e. should not block on network operations.
|
||||||
|
|
||||||
// RegisterPod registers all objects referenced from a given pod.
|
// RegisterPod registers all objects referenced from a given pod.
|
||||||
|
//
|
||||||
|
// NOTE: All implementations of RegisterPod should be idempotent.
|
||||||
RegisterPod(pod *v1.Pod)
|
RegisterPod(pod *v1.Pod)
|
||||||
|
|
||||||
// UnregisterPod unregisters objects referenced from a given pod that are not
|
// UnregisterPod unregisters objects referenced from a given pod that are not
|
||||||
// used by any other registered pod.
|
// used by any other registered pod.
|
||||||
|
//
|
||||||
|
// NOTE: All implementations of UnregisterPod should be idempotent.
|
||||||
UnregisterPod(pod *v1.Pod)
|
UnregisterPod(pod *v1.Pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue