mirror of https://github.com/k3s-io/k3s
Fix Stackdriver Logging soak tests issues
parent
c75d3028dd
commit
0210c3dd77
|
@ -84,8 +84,9 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
|
|||
for runIdx := 0; runIdx < podRunCount; runIdx++ {
|
||||
// Starting one pod on each node.
|
||||
for _, pod := range podsByRun[runIdx] {
|
||||
err := pod.Start(f)
|
||||
framework.Logf("Failed to start pod: %v", err)
|
||||
if err := pod.Start(f); err != nil {
|
||||
framework.Logf("Failed to start pod: %v", err)
|
||||
}
|
||||
}
|
||||
<-t.C
|
||||
}
|
||||
|
|
|
@ -65,7 +65,12 @@ func EnsureLoggingAgentRestartsCount(f *framework.Framework, appName string, max
|
|||
|
||||
maxRestartCount := 0
|
||||
for _, pod := range agentPods.Items {
|
||||
restartCount := int(pod.Status.ContainerStatuses[0].RestartCount)
|
||||
contStatuses := pod.Status.ContainerStatuses
|
||||
if len(contStatuses) == 0 {
|
||||
framework.Logf("There are no container statuses for pod %s", pod.Name)
|
||||
continue
|
||||
}
|
||||
restartCount := int(contStatuses[0].RestartCount)
|
||||
maxRestartCount = integer.IntMax(maxRestartCount, restartCount)
|
||||
|
||||
framework.Logf("Logging agent %s on node %s was restarted %d times",
|
||||
|
|
|
@ -150,9 +150,6 @@ func getFullIngestionPred(podsMap map[string]FiniteLoggingPod) NumberedIngestion
|
|||
return func(name string, occ map[int]bool) (bool, error) {
|
||||
p := podsMap[name]
|
||||
ok := len(occ) == p.ExpectedLineCount()
|
||||
if !ok {
|
||||
framework.Logf("Pod %s is still missing %d lines", name, p.ExpectedLineCount()-len(occ))
|
||||
}
|
||||
return ok, nil
|
||||
}
|
||||
}
|
||||
|
@ -160,24 +157,27 @@ func getFullIngestionPred(podsMap map[string]FiniteLoggingPod) NumberedIngestion
|
|||
func getFullIngestionTimeout(podsMap map[string]FiniteLoggingPod, slack float64) NumberedTimeoutFun {
|
||||
return func(names []string, occs map[string]map[int]bool) error {
|
||||
totalGot, totalWant := 0, 0
|
||||
podsWithLosses := []string{}
|
||||
lossMsgs := []string{}
|
||||
for _, name := range names {
|
||||
got := len(occs[name])
|
||||
want := podsMap[name].ExpectedLineCount()
|
||||
if got != want {
|
||||
podsWithLosses = append(podsWithLosses, name)
|
||||
lossMsg := fmt.Sprintf("%s: %d lines", name, want-got)
|
||||
lossMsgs = append(lossMsgs, lossMsg)
|
||||
}
|
||||
totalGot += got
|
||||
totalWant += want
|
||||
}
|
||||
if len(podsWithLosses) > 0 {
|
||||
framework.Logf("Still missing logs from: %s", strings.Join(podsWithLosses, ", "))
|
||||
if len(lossMsgs) > 0 {
|
||||
framework.Logf("Still missing logs from:\n%s", strings.Join(lossMsgs, "\n"))
|
||||
}
|
||||
lostFrac := 1 - float64(totalGot)/float64(totalWant)
|
||||
if lostFrac > slack {
|
||||
return fmt.Errorf("still missing %.2f%% of logs, only %.2f%% is tolerable",
|
||||
lostFrac*100, slack*100)
|
||||
}
|
||||
framework.Logf("Missing %.2f%% of logs, which is lower than the threshold %.2f%%",
|
||||
lostFrac*100, slack*100)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue