mirror of https://github.com/k3s-io/k3s
Merge pull request #52355 from davidz627/e2e_nil
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.. E2E test to make sure controller does not crash because of nil volume spec Fixes #49521 Tests fix of issue referenced in #49418pull/6/head
commit
7e7bcabe17
|
@ -3917,6 +3917,33 @@ func WaitForControllerManagerUp() error {
|
|||
return fmt.Errorf("waiting for controller-manager timed out")
|
||||
}
|
||||
|
||||
// CheckForControllerManagerHealthy checks that the controller manager does not crash within "duration"
|
||||
func CheckForControllerManagerHealthy(duration time.Duration) error {
|
||||
var PID string
|
||||
cmd := "sudo docker ps | grep k8s_kube-controller-manager | cut -d ' ' -f 1"
|
||||
for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) {
|
||||
result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil {
|
||||
// We don't necessarily know that it crashed, pipe could just be broken
|
||||
LogSSHResult(result)
|
||||
return fmt.Errorf("master unreachable after %v", time.Since(start))
|
||||
} else if result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
return fmt.Errorf("SSH result code not 0. actually: %v after %v", result.Code, time.Since(start))
|
||||
} else if result.Stdout != PID {
|
||||
if PID == "" {
|
||||
PID = result.Stdout
|
||||
} else {
|
||||
//its dead
|
||||
return fmt.Errorf("controller manager crashed, old PID: %s, new PID: %s", PID, result.Stdout)
|
||||
}
|
||||
} else {
|
||||
Logf("kube-controller-manager still healthy after %v", time.Since(start))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns number of ready Nodes excluding Master Node.
|
||||
func NumberOfReadyNodes(c clientset.Interface) (int, error) {
|
||||
nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
|
|
|
@ -492,3 +492,15 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
|
|||
err = WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func CreateGCEVolume() (*v1.PersistentVolumeSource, string) {
|
||||
diskName, err := CreatePDWithRetry()
|
||||
ExpectNoError(err)
|
||||
return &v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}, diskName
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
@ -108,6 +109,96 @@ var _ = SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
|
|||
framework.DeletePodWithWait(f, c, nfsServerPod)
|
||||
})
|
||||
|
||||
Context("when kube-controller-manager restarts", func() {
|
||||
var (
|
||||
diskName1, diskName2 string
|
||||
err error
|
||||
pvConfig1, pvConfig2 framework.PersistentVolumeConfig
|
||||
pv1, pv2 *v1.PersistentVolume
|
||||
pvSource1, pvSource2 *v1.PersistentVolumeSource
|
||||
pvc1, pvc2 *v1.PersistentVolumeClaim
|
||||
clientPod *v1.Pod
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
framework.SkipUnlessSSHKeyPresent()
|
||||
|
||||
By("Initializing first PD with PVPVC binding")
|
||||
pvSource1, diskName1 = framework.CreateGCEVolume()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pvConfig1 = framework.PersistentVolumeConfig{
|
||||
NamePrefix: "gce-",
|
||||
Labels: volLabel,
|
||||
PVSource: *pvSource1,
|
||||
Prebind: nil,
|
||||
}
|
||||
pv1, pvc1, err = framework.CreatePVPVC(c, pvConfig1, pvcConfig, ns, false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv1, pvc1))
|
||||
|
||||
By("Initializing second PD with PVPVC binding")
|
||||
pvSource2, diskName2 = framework.CreateGCEVolume()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pvConfig2 = framework.PersistentVolumeConfig{
|
||||
NamePrefix: "gce-",
|
||||
Labels: volLabel,
|
||||
PVSource: *pvSource2,
|
||||
Prebind: nil,
|
||||
}
|
||||
pv2, pvc2, err = framework.CreatePVPVC(c, pvConfig2, pvcConfig, ns, false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv2, pvc2))
|
||||
|
||||
By("Attaching both PVC's to a single pod")
|
||||
clientPod, err = framework.CreatePod(c, ns, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// Delete client/user pod first
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod))
|
||||
|
||||
// Delete PV and PVCs
|
||||
if errs := framework.PVPVCCleanup(c, ns, pv1, pvc1); len(errs) > 0 {
|
||||
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
pv1, pvc1 = nil, nil
|
||||
if errs := framework.PVPVCCleanup(c, ns, pv2, pvc2); len(errs) > 0 {
|
||||
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
pv2, pvc2 = nil, nil
|
||||
|
||||
// Delete the actual disks
|
||||
if diskName1 != "" {
|
||||
framework.ExpectNoError(framework.DeletePDWithRetry(diskName1))
|
||||
}
|
||||
if diskName2 != "" {
|
||||
framework.ExpectNoError(framework.DeletePDWithRetry(diskName2))
|
||||
}
|
||||
})
|
||||
|
||||
It("should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash", func() {
|
||||
By("Deleting PVC for volume 2")
|
||||
err = framework.DeletePersistentVolumeClaim(c, pvc2.Name, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pvc2 = nil
|
||||
|
||||
By("Restarting the kube-controller-manager")
|
||||
err = framework.RestartControllerManager()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForControllerManagerUp()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("kube-controller-manager restarted")
|
||||
|
||||
By("Observing the kube-controller-manager healthy for at least 2 minutes")
|
||||
// Continue checking for 2 minutes to make sure kube-controller-manager is healthy
|
||||
err = framework.CheckForControllerManagerHealthy(2 * time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
Context("when kubelet restarts", func() {
|
||||
var (
|
||||
clientPod *v1.Pod
|
||||
|
|
|
@ -42,17 +42,6 @@ const (
|
|||
pvReadCmd string = "cat " + pvTestFile
|
||||
)
|
||||
|
||||
func (t *PersistentVolumeUpgradeTest) createGCEVolume() *v1.PersistentVolumeSource {
|
||||
diskName, err := framework.CreatePDWithRetry()
|
||||
framework.ExpectNoError(err)
|
||||
return &v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
func (t *PersistentVolumeUpgradeTest) deleteGCEVolume(pvSource *v1.PersistentVolumeSource) error {
|
||||
return framework.DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
|
||||
}
|
||||
|
@ -67,7 +56,7 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
|
|||
ns := f.Namespace.Name
|
||||
|
||||
By("Initializing PV source")
|
||||
t.pvSource = t.createGCEVolume()
|
||||
t.pvSource, _ = framework.CreateGCEVolume()
|
||||
pvConfig := framework.PersistentVolumeConfig{
|
||||
NamePrefix: "pv-upgrade",
|
||||
PVSource: *t.pvSource,
|
||||
|
|
Loading…
Reference in New Issue