mirror of https://github.com/k3s-io/k3s
Remove unnecessary daemonset addition/deletion
Signed-off-by: Derek Nola <derek.nola@suse.com>pull/7742/head
parent
d273f7efce
commit
2105147ce7
|
@ -214,15 +214,6 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
|
||||||
return serverNodeNames, agentNodeNames, nil
|
return serverNodeNames, agentNodeNames, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deletes the content of a manifest file previously applied
|
|
||||||
func DeleteWorkload(workload, kubeconfig string) error {
|
|
||||||
cmd := "kubectl delete -f " + workload + " --kubeconfig=" + kubeconfig
|
|
||||||
if _, err := RunCommand(cmd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func DeployWorkload(workload, kubeconfig string, hardened bool) (string, error) {
|
func DeployWorkload(workload, kubeconfig string, hardened bool) (string, error) {
|
||||||
resourceDir := "../amd64_resource_files"
|
resourceDir := "../amd64_resource_files"
|
||||||
if hardened {
|
if hardened {
|
||||||
|
|
|
@ -274,25 +274,6 @@ var _ = Describe("Verify Create", Ordered, func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("Validate restart", func() {
|
Context("Validate restart", func() {
|
||||||
It("Deletes daemonset", func() {
|
|
||||||
_, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, *hardened)
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
|
|
||||||
defer e2e.DeleteWorkload("daemonset.yaml", kubeConfigFile)
|
|
||||||
nodes, _ := e2e.ParseNodes(kubeConfigFile, false)
|
|
||||||
|
|
||||||
Eventually(func(g Gomega) {
|
|
||||||
pods, _ := e2e.ParsePods(kubeConfigFile, false)
|
|
||||||
count := e2e.CountOfStringInSlice("test-daemonset", pods)
|
|
||||||
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
|
|
||||||
podsRunning := 0
|
|
||||||
for _, pod := range pods {
|
|
||||||
if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" {
|
|
||||||
podsRunning++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
g.Expect(len(nodes)).Should((Equal(podsRunning)), "Daemonset running pods count does not match node count")
|
|
||||||
}, "620s", "5s").Should(Succeed())
|
|
||||||
})
|
|
||||||
It("Restarts normally", func() {
|
It("Restarts normally", func() {
|
||||||
errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))
|
errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))
|
||||||
Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly")
|
Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly")
|
||||||
|
|
Loading…
Reference in New Issue