mirror of https://github.com/k3s-io/k3s
Make predicate errors more human readable
parent
0726f8c726
commit
c389e3cec7
|
@ -30,31 +30,31 @@ var (
|
|||
// be made to pass by removing pods, or you change an existing predicate so that
|
||||
// it can never be made to pass by removing pods, you need to add the predicate
|
||||
// failure error in nodesWherePreemptionMightHelp() in scheduler/core/generic_scheduler.go
|
||||
ErrDiskConflict = newPredicateFailureError("NoDiskConflict")
|
||||
ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict")
|
||||
ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector")
|
||||
ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity")
|
||||
ErrPodAffinityRulesNotMatch = newPredicateFailureError("PodAffinityRulesNotMatch")
|
||||
ErrPodAntiAffinityRulesNotMatch = newPredicateFailureError("PodAntiAffinityRulesNotMatch")
|
||||
ErrExistingPodsAntiAffinityRulesNotMatch = newPredicateFailureError("ExistingPodsAntiAffinityRulesNotMatch")
|
||||
ErrTaintsTolerationsNotMatch = newPredicateFailureError("PodToleratesNodeTaints")
|
||||
ErrPodNotMatchHostName = newPredicateFailureError("HostName")
|
||||
ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts")
|
||||
ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence")
|
||||
ErrServiceAffinityViolated = newPredicateFailureError("CheckServiceAffinity")
|
||||
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount")
|
||||
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure")
|
||||
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure")
|
||||
ErrNodeOutOfDisk = newPredicateFailureError("NodeOutOfDisk")
|
||||
ErrNodeNotReady = newPredicateFailureError("NodeNotReady")
|
||||
ErrNodeNetworkUnavailable = newPredicateFailureError("NodeNetworkUnavailable")
|
||||
ErrNodeUnschedulable = newPredicateFailureError("NodeUnschedulable")
|
||||
ErrNodeUnknownCondition = newPredicateFailureError("NodeUnknownCondition")
|
||||
ErrVolumeNodeConflict = newPredicateFailureError("VolumeNodeAffinityConflict")
|
||||
ErrVolumeBindConflict = newPredicateFailureError("VolumeBindingNoMatch")
|
||||
ErrDiskConflict = newPredicateFailureError("NoDiskConflict", "node(s) had no available disk")
|
||||
ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict", "node(s) had no available volume zone")
|
||||
ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector", "node(s) didn't match node selector")
|
||||
ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity", "node(s) didn't match pod affinity/anti-affinity")
|
||||
ErrPodAffinityRulesNotMatch = newPredicateFailureError("PodAffinityRulesNotMatch", "node(s) didn't match pod affinity rules")
|
||||
ErrPodAntiAffinityRulesNotMatch = newPredicateFailureError("PodAntiAffinityRulesNotMatch", "node(s) didn't match pod anti-affinity rules")
|
||||
ErrExistingPodsAntiAffinityRulesNotMatch = newPredicateFailureError("ExistingPodsAntiAffinityRulesNotMatch", "node(s) didn't satisfy existing pods anti-affinity rules")
|
||||
ErrTaintsTolerationsNotMatch = newPredicateFailureError("PodToleratesNodeTaints", "node(s) had taints that the pod didn't tolerate")
|
||||
ErrPodNotMatchHostName = newPredicateFailureError("HostName", "node(s) didn't match the requested hostname")
|
||||
ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts", "node(s) didn't have free ports for the requested pod ports")
|
||||
ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence", "node(s) didn't have the requested labels")
|
||||
ErrServiceAffinityViolated = newPredicateFailureError("CheckServiceAffinity", "node(s) didn't match service affinity")
|
||||
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount", "node(s) exceed max volume count")
|
||||
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure", "node(s) had memory pressure")
|
||||
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure", "node(s) had disk pressure")
|
||||
ErrNodeOutOfDisk = newPredicateFailureError("NodeOutOfDisk", "node(s) were out of disk space")
|
||||
ErrNodeNotReady = newPredicateFailureError("NodeNotReady", "node(s) were not ready")
|
||||
ErrNodeNetworkUnavailable = newPredicateFailureError("NodeNetworkUnavailable", "node(s) had unavailable network")
|
||||
ErrNodeUnschedulable = newPredicateFailureError("NodeUnschedulable", "node(s) were unschedulable")
|
||||
ErrNodeUnknownCondition = newPredicateFailureError("NodeUnknownCondition", "node(s) had unknown conditions")
|
||||
ErrVolumeNodeConflict = newPredicateFailureError("VolumeNodeAffinityConflict", "node(s) had volume node affinity conflict")
|
||||
ErrVolumeBindConflict = newPredicateFailureError("VolumeBindingNoMatch", "node(s) didn't find available persistent volumes to bind")
|
||||
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error
|
||||
// as ErrFakePredicate.
|
||||
ErrFakePredicate = newPredicateFailureError("FakePredicateError")
|
||||
ErrFakePredicate = newPredicateFailureError("FakePredicateError", "Nodes failed the fake predicate")
|
||||
)
|
||||
|
||||
// InsufficientResourceError is an error type that indicates what kind of resource limit is
|
||||
|
@ -91,10 +91,11 @@ func (e *InsufficientResourceError) GetInsufficientAmount() int64 {
|
|||
|
||||
type PredicateFailureError struct {
|
||||
PredicateName string
|
||||
PredicateDesc string
|
||||
}
|
||||
|
||||
func newPredicateFailureError(predicateName string) *PredicateFailureError {
|
||||
return &PredicateFailureError{PredicateName: predicateName}
|
||||
func newPredicateFailureError(predicateName, predicateDesc string) *PredicateFailureError {
|
||||
return &PredicateFailureError{PredicateName: predicateName, PredicateDesc: predicateDesc}
|
||||
}
|
||||
|
||||
func (e *PredicateFailureError) Error() string {
|
||||
|
@ -102,7 +103,7 @@ func (e *PredicateFailureError) Error() string {
|
|||
}
|
||||
|
||||
func (e *PredicateFailureError) GetReason() string {
|
||||
return e.PredicateName
|
||||
return e.PredicateDesc
|
||||
}
|
||||
|
||||
type FailureReason struct {
|
||||
|
|
|
@ -518,7 +518,7 @@ func TestHumanReadableFitError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
if strings.Contains(err.Error(), "0/3 nodes are available") {
|
||||
if strings.Contains(err.Error(), "2 NodeUnderDiskPressure") && strings.Contains(err.Error(), "1 NodeUnderMemoryPressure") {
|
||||
if strings.Contains(err.Error(), "2 node(s) had disk pressure") && strings.Contains(err.Error(), "1 node(s) had memory pressure") {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -679,7 +679,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
|||
FindBoundSatsified: false,
|
||||
},
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: makePredicateError("1 VolumeNodeAffinityConflict"),
|
||||
expectError: makePredicateError("1 node(s) had volume node affinity conflict"),
|
||||
},
|
||||
"unbound,no-matches": {
|
||||
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
|
||||
|
@ -687,7 +687,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
|||
FindBoundSatsified: true,
|
||||
},
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: makePredicateError("1 VolumeBindingNoMatch"),
|
||||
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"),
|
||||
},
|
||||
"bound-and-unbound-unsatisfied": {
|
||||
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
|
||||
|
@ -695,7 +695,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
|||
FindBoundSatsified: false,
|
||||
},
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: makePredicateError("1 VolumeBindingNoMatch, 1 VolumeNodeAffinityConflict"),
|
||||
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),
|
||||
},
|
||||
"unbound,found-matches": {
|
||||
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
|
||||
|
|
|
@ -95,8 +95,8 @@ func TestLocalPVNegativeAffinity(t *testing.T) {
|
|||
if strings.Compare(p.Status.Conditions[0].Reason, "Unschedulable") != 0 {
|
||||
t.Fatalf("Failed as Pod %s reason was: %s but expected: Unschedulable", podName, p.Status.Conditions[0].Reason)
|
||||
}
|
||||
if !strings.Contains(p.Status.Conditions[0].Message, "MatchNodeSelector") || !strings.Contains(p.Status.Conditions[0].Message, "VolumeNodeAffinityConflict") {
|
||||
t.Fatalf("Failed as Pod's %s failure message does not contain expected keywords: MatchNodeSelector, VolumeNodeAffinityConflict", podName)
|
||||
if !strings.Contains(p.Status.Conditions[0].Message, "node(s) didn't match node selector") || !strings.Contains(p.Status.Conditions[0].Message, "node(s) had volume node affinity conflict") {
|
||||
t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match node selector, node(s) had volume node affinity conflict", podName)
|
||||
}
|
||||
if err := config.client.CoreV1().Pods(config.ns).Delete(podName, &metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Failed to delete Pod %s: %v", podName, err)
|
||||
|
|
Loading…
Reference in New Issue