mirror of https://github.com/k3s-io/k3s
commit
714fcd910f
|
@ -619,13 +619,9 @@ func (gc *GarbageCollector) attemptToOrphanWorker() bool {
|
||||||
// GraphHasUID returns if the GraphBuilder has a particular UID store in its
|
// GraphHasUID returns if the GraphBuilder has a particular UID store in its
|
||||||
// uidToNode graph. It's useful for debugging.
|
// uidToNode graph. It's useful for debugging.
|
||||||
// This method is used by integration tests.
|
// This method is used by integration tests.
|
||||||
func (gc *GarbageCollector) GraphHasUID(UIDs []types.UID) bool {
|
func (gc *GarbageCollector) GraphHasUID(u types.UID) bool {
|
||||||
for _, u := range UIDs {
|
_, ok := gc.dependencyGraphBuilder.uidToNode.Read(u)
|
||||||
if _, ok := gc.dependencyGraphBuilder.uidToNode.Read(u); ok {
|
return ok
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDeletableResources returns all resources from discoveryClient that the
|
// GetDeletableResources returns all resources from discoveryClient that the
|
||||||
|
|
|
@ -373,7 +373,7 @@ func TestCascadingDeletion(t *testing.T) {
|
||||||
// the gc, so wait for the garbage collector to observe the deletion of
|
// the gc, so wait for the garbage collector to observe the deletion of
|
||||||
// the toBeDeletedRC
|
// the toBeDeletedRC
|
||||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
return !gc.GraphHasUID([]types.UID{toBeDeletedRC.ObjectMeta.UID}), nil
|
return !gc.GraphHasUID(toBeDeletedRC.ObjectMeta.UID), nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -440,18 +440,39 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
|
||||||
for j := 0; j < 3; j++ {
|
for j := 0; j < 3; j++ {
|
||||||
podName := "test.pod." + nameSuffix + "-" + strconv.Itoa(j)
|
podName := "test.pod." + nameSuffix + "-" + strconv.Itoa(j)
|
||||||
pod := newPod(podName, namespace, []metav1.OwnerReference{{UID: rc.ObjectMeta.UID, Name: rc.ObjectMeta.Name}})
|
pod := newPod(podName, namespace, []metav1.OwnerReference{{UID: rc.ObjectMeta.UID, Name: rc.ObjectMeta.Name}})
|
||||||
_, err = podClient.Create(pod)
|
createdPod, err := podClient.Create(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create Pod: %v", err)
|
t.Fatalf("Failed to create Pod: %v", err)
|
||||||
}
|
}
|
||||||
podUIDs = append(podUIDs, pod.ObjectMeta.UID)
|
podUIDs = append(podUIDs, createdPod.ObjectMeta.UID)
|
||||||
|
}
|
||||||
|
orphan := false
|
||||||
|
switch {
|
||||||
|
case options == nil:
|
||||||
|
// if there are no deletion options, the default policy for replication controllers is orphan
|
||||||
|
orphan = true
|
||||||
|
case options.OrphanDependents != nil:
|
||||||
|
// if the deletion options explicitly specify whether to orphan, that controls
|
||||||
|
orphan = *options.OrphanDependents
|
||||||
|
case len(initialFinalizers) != 0 && initialFinalizers[0] == metav1.FinalizerOrphanDependents:
|
||||||
|
// if the orphan finalizer is explicitly added, we orphan
|
||||||
|
orphan = true
|
||||||
}
|
}
|
||||||
orphan := (options != nil && options.OrphanDependents != nil && *options.OrphanDependents) || (options == nil && len(initialFinalizers) != 0 && initialFinalizers[0] == metav1.FinalizerOrphanDependents)
|
|
||||||
// if we intend to orphan the pods, we need wait for the gc to observe the
|
// if we intend to orphan the pods, we need wait for the gc to observe the
|
||||||
// creation of the pods, otherwise if the deletion of RC is observed before
|
// creation of the pods, otherwise if the deletion of RC is observed before
|
||||||
// the creation of the pods, the pods will not be orphaned.
|
// the creation of the pods, the pods will not be orphaned.
|
||||||
if orphan {
|
if orphan {
|
||||||
wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { return gc.GraphHasUID(podUIDs), nil })
|
err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
|
for _, u := range podUIDs {
|
||||||
|
if !gc.GraphHasUID(u) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to observe the expected pods in the GC graph for rc %s", rcName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// delete the rc
|
// delete the rc
|
||||||
if err := rcClient.Delete(rc.ObjectMeta.Name, options); err != nil {
|
if err := rcClient.Delete(rc.ObjectMeta.Name, options); err != nil {
|
||||||
|
@ -534,13 +555,11 @@ func TestStressingCascadingDeletion(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify there is no node representing replication controllers in the gc's graph
|
// verify there is no node representing replication controllers in the gc's graph
|
||||||
uids := make([]types.UID, 0, collections)
|
|
||||||
for i := 0; i < collections; i++ {
|
for i := 0; i < collections; i++ {
|
||||||
uid := <-rcUIDs
|
uid := <-rcUIDs
|
||||||
uids = append(uids, uid)
|
if gc.GraphHasUID(uid) {
|
||||||
}
|
t.Errorf("Expect all nodes representing replication controllers are removed from the Propagator's graph")
|
||||||
if gc.GraphHasUID(uids) {
|
}
|
||||||
t.Errorf("Expect all nodes representing replication controllers are removed from the Propagator's graph")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -568,17 +587,27 @@ func TestOrphaning(t *testing.T) {
|
||||||
for i := 0; i < podsNum; i++ {
|
for i := 0; i < podsNum; i++ {
|
||||||
podName := garbageCollectedPodName + strconv.Itoa(i)
|
podName := garbageCollectedPodName + strconv.Itoa(i)
|
||||||
pod := newPod(podName, ns.Name, []metav1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}})
|
pod := newPod(podName, ns.Name, []metav1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}})
|
||||||
_, err = podClient.Create(pod)
|
createdPod, err := podClient.Create(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create Pod: %v", err)
|
t.Fatalf("Failed to create Pod: %v", err)
|
||||||
}
|
}
|
||||||
podUIDs = append(podUIDs, pod.ObjectMeta.UID)
|
podUIDs = append(podUIDs, createdPod.ObjectMeta.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// we need wait for the gc to observe the creation of the pods, otherwise if
|
// we need wait for the gc to observe the creation of the pods, otherwise if
|
||||||
// the deletion of RC is observed before the creation of the pods, the pods
|
// the deletion of RC is observed before the creation of the pods, the pods
|
||||||
// will not be orphaned.
|
// will not be orphaned.
|
||||||
wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { return gc.GraphHasUID(podUIDs), nil })
|
err = wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
|
for _, u := range podUIDs {
|
||||||
|
if !gc.GraphHasUID(u) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to observe pods in GC graph for %s: %v", toBeDeletedRC.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
err = rcClient.Delete(toBeDeletedRCName, getOrphanOptions())
|
err = rcClient.Delete(toBeDeletedRCName, getOrphanOptions())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
Loading…
Reference in New Issue