mirror of https://github.com/k3s-io/k3s
Merge pull request #62853 from tony612/fix-resultRun-reset
Automatic merge from submit-queue (batch tested with PRs 62655, 61711, 59122, 62853, 62390). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. reset resultRun to 0 on pod restart **What this PR does / why we need it**: The resultRun should be reset to 0 on pod restart, so that resultRun on the first failure of the new container will be 1, which is correct. Otherwise, the actual FailureThreshold after restarting will be `FailureThreshold - 1`. **Which issue(s) this PR fixes**: This PR is related to https://github.com/kubernetes/kubernetes/issues/53530. https://github.com/kubernetes/kubernetes/pull/46371 fixed that issue but there's still a little problem like what I said above. **Special notes for your reviewer**: **Release note**: ```release-note fix resultRun by resetting it to 0 on pod restart ```pull/8/head
commit
f68d10cfe4
|
@ -240,7 +240,7 @@ func (w *worker) doProbe() (keepGoing bool) {
|
||||||
// chance of hitting #21751, where running `docker exec` when a
|
// chance of hitting #21751, where running `docker exec` when a
|
||||||
// container is being stopped may lead to corrupted container state.
|
// container is being stopped may lead to corrupted container state.
|
||||||
w.onHold = true
|
w.onHold = true
|
||||||
w.resultRun = 1
|
w.resultRun = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -352,7 +352,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
|
||||||
expectContinue(t, w, w.doProbe(), msg)
|
expectContinue(t, w, w.doProbe(), msg)
|
||||||
expectResult(t, w, results.Success, msg)
|
expectResult(t, w, results.Success, msg)
|
||||||
if w.resultRun != 1 {
|
if w.resultRun != 1 {
|
||||||
t.Errorf("Prober resultRun should 1")
|
t.Errorf("Prober resultRun should be 1")
|
||||||
}
|
}
|
||||||
|
|
||||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||||
|
@ -360,7 +360,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
|
||||||
expectContinue(t, w, w.doProbe(), msg)
|
expectContinue(t, w, w.doProbe(), msg)
|
||||||
expectResult(t, w, results.Success, msg)
|
expectResult(t, w, results.Success, msg)
|
||||||
if w.resultRun != 1 {
|
if w.resultRun != 1 {
|
||||||
t.Errorf("Prober resultRun should 1")
|
t.Errorf("Prober resultRun should be 1")
|
||||||
}
|
}
|
||||||
|
|
||||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||||
|
@ -372,13 +372,13 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exceeding FailureThreshold should cause resultRun to
|
// Exceeding FailureThreshold should cause resultRun to
|
||||||
// reset to 1 so that the probe on the restarted pod
|
// reset to 0 so that the probe on the restarted pod
|
||||||
// also gets FailureThreshold attempts to succeed.
|
// also gets FailureThreshold attempts to succeed.
|
||||||
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
m.prober.exec = fakeExecProber{probe.Failure, nil}
|
||||||
msg = "3rd probe failure, result failure"
|
msg = "3rd probe failure, result failure"
|
||||||
expectContinue(t, w, w.doProbe(), msg)
|
expectContinue(t, w, w.doProbe(), msg)
|
||||||
expectResult(t, w, results.Failure, msg)
|
expectResult(t, w, results.Failure, msg)
|
||||||
if w.resultRun != 1 {
|
if w.resultRun != 0 {
|
||||||
t.Errorf("Prober resultRun should be reset to 1")
|
t.Errorf("Prober resultRun should be reset to 0")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue