mirror of https://github.com/k3s-io/k3s
Add scheduler option for bind timeout
parent
8fcbcafc11
commit
e124159990
|
@ -144,6 +144,7 @@ users:
|
|||
}
|
||||
|
||||
defaultSource := "DefaultProvider"
|
||||
defaultBindTimeoutSeconds := int64(600)
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
|
@ -157,7 +158,10 @@ users:
|
|||
options: &Options{
|
||||
ConfigFile: configFile,
|
||||
ComponentConfig: func() kubeschedulerconfig.KubeSchedulerConfiguration {
|
||||
cfg, _ := newDefaultComponentConfig()
|
||||
cfg, err := newDefaultComponentConfig()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return *cfg
|
||||
}(),
|
||||
},
|
||||
|
@ -187,6 +191,7 @@ users:
|
|||
ContentType: "application/vnd.kubernetes.protobuf",
|
||||
},
|
||||
PercentageOfNodesToScore: 50,
|
||||
BindTimeoutSeconds: &defaultBindTimeoutSeconds,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -229,6 +234,7 @@ users:
|
|||
ContentType: "application/vnd.kubernetes.protobuf",
|
||||
},
|
||||
PercentageOfNodesToScore: 50,
|
||||
BindTimeoutSeconds: &defaultBindTimeoutSeconds,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
|
@ -305,6 +305,7 @@ func NewSchedulerConfig(s schedulerserverconfig.CompletedConfig) (*scheduler.Con
|
|||
EnableEquivalenceClassCache: utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
DisablePreemption: s.ComponentConfig.DisablePreemption,
|
||||
PercentageOfNodesToScore: s.ComponentConfig.PercentageOfNodesToScore,
|
||||
BindTimeoutSeconds: *s.ComponentConfig.BindTimeoutSeconds,
|
||||
})
|
||||
|
||||
source := s.ComponentConfig.AlgorithmSource
|
||||
|
|
|
@ -85,6 +85,11 @@ type KubeSchedulerConfiguration struct {
|
|||
// DEPRECATED.
|
||||
// Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
|
||||
FailureDomains string
|
||||
|
||||
// Duration to wait for a binding operation to complete before timing out
|
||||
// Value must be non-negative integer. The value zero indicates no waiting.
|
||||
// If this value is nil, the default value will be used.
|
||||
BindTimeoutSeconds *int64
|
||||
}
|
||||
|
||||
// SchedulerAlgorithmSource is the source of a scheduler algorithm. One source
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
apiserverconfigv1alpha1 "k8s.io/apiserver/pkg/apis/config/v1alpha1"
|
||||
kubescedulerconfigv1alpha1 "k8s.io/kube-scheduler/config/v1alpha1"
|
||||
|
||||
// this package shouldn't really depend on other k8s.io/kubernetes code
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
|
@ -102,4 +103,9 @@ func SetDefaults_KubeSchedulerConfiguration(obj *kubescedulerconfigv1alpha1.Kube
|
|||
|
||||
// Use the default LeaderElectionConfiguration options
|
||||
apiserverconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection.LeaderElectionConfiguration)
|
||||
|
||||
if obj.BindTimeoutSeconds == nil {
|
||||
defaultBindTimeoutSeconds := int64(600)
|
||||
obj.BindTimeoutSeconds = &defaultBindTimeoutSeconds
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,6 +41,9 @@ func ValidateKubeSchedulerConfiguration(cc *config.KubeSchedulerConfiguration) f
|
|||
if cc.HardPodAffinitySymmetricWeight < 0 || cc.HardPodAffinitySymmetricWeight > 100 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("hardPodAffinitySymmetricWeight"), cc.HardPodAffinitySymmetricWeight, "not in valid range 0-100"))
|
||||
}
|
||||
if cc.BindTimeoutSeconds == nil {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("bindTimeoutSeconds"), ""))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
|
|
|
@ -17,15 +17,17 @@ limitations under the License.
|
|||
package validation
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apimachinery "k8s.io/apimachinery/pkg/apis/config"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiserver "k8s.io/apiserver/pkg/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestValidateKubeSchedulerConfiguration(t *testing.T) {
|
||||
testTimeout := int64(0)
|
||||
validConfig := &config.KubeSchedulerConfiguration{
|
||||
SchedulerName: "me",
|
||||
HealthzBindAddress: "0.0.0.0:10254",
|
||||
|
@ -56,6 +58,7 @@ func TestValidateKubeSchedulerConfiguration(t *testing.T) {
|
|||
RetryPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
BindTimeoutSeconds: &testTimeout,
|
||||
}
|
||||
|
||||
HardPodAffinitySymmetricWeightGt100 := validConfig.DeepCopy()
|
||||
|
@ -86,6 +89,9 @@ func TestValidateKubeSchedulerConfiguration(t *testing.T) {
|
|||
enableContentProfilingSetWithoutEnableProfiling.EnableProfiling = false
|
||||
enableContentProfilingSetWithoutEnableProfiling.EnableContentionProfiling = true
|
||||
|
||||
bindTimeoutUnset := validConfig.DeepCopy()
|
||||
bindTimeoutUnset.BindTimeoutSeconds = nil
|
||||
|
||||
scenarios := map[string]struct {
|
||||
expectedToFail bool
|
||||
config *config.KubeSchedulerConfiguration
|
||||
|
@ -126,6 +132,10 @@ func TestValidateKubeSchedulerConfiguration(t *testing.T) {
|
|||
expectedToFail: true,
|
||||
config: HardPodAffinitySymmetricWeightLt0,
|
||||
},
|
||||
"bind-timeout-unset": {
|
||||
expectedToFail: true,
|
||||
config: bindTimeoutUnset,
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
|
|
|
@ -159,6 +159,7 @@ type ConfigFactoryArgs struct {
|
|||
EnableEquivalenceClassCache bool
|
||||
DisablePreemption bool
|
||||
PercentageOfNodesToScore int32
|
||||
BindTimeoutSeconds int64
|
||||
}
|
||||
|
||||
// NewConfigFactory initializes the default implementation of a Configurator To encourage eventual privatization of the struct type, we only
|
||||
|
@ -305,7 +306,7 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator {
|
|||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
|
||||
// Setup volume binder
|
||||
c.volumeBinder = volumebinder.NewVolumeBinder(args.Client, args.PvcInformer, args.PvInformer, args.StorageClassInformer)
|
||||
c.volumeBinder = volumebinder.NewVolumeBinder(args.Client, args.PvcInformer, args.PvInformer, args.StorageClassInformer, time.Duration(args.BindTimeoutSeconds)*time.Second)
|
||||
|
||||
args.StorageClassInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
|
|
|
@ -49,6 +49,7 @@ import (
|
|||
const (
|
||||
enableEquivalenceCache = true
|
||||
disablePodPreemption = false
|
||||
bindTimeoutSeconds = 600
|
||||
)
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
|
@ -557,6 +558,7 @@ func newConfigFactory(client *clientset.Clientset, hardPodAffinitySymmetricWeigh
|
|||
enableEquivalenceCache,
|
||||
disablePodPreemption,
|
||||
schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
bindTimeoutSeconds,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -36,11 +36,11 @@ func NewVolumeBinder(
|
|||
client clientset.Interface,
|
||||
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
||||
pvInformer coreinformers.PersistentVolumeInformer,
|
||||
storageClassInformer storageinformers.StorageClassInformer) *VolumeBinder {
|
||||
storageClassInformer storageinformers.StorageClassInformer,
|
||||
bindTimeout time.Duration) *VolumeBinder {
|
||||
|
||||
return &VolumeBinder{
|
||||
// TODO: what is a good bind timeout value?
|
||||
Binder: persistentvolume.NewVolumeBinder(client, pvcInformer, pvInformer, storageClassInformer, 10*time.Minute),
|
||||
Binder: persistentvolume.NewVolumeBinder(client, pvcInformer, pvInformer, storageClassInformer, bindTimeout),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -81,6 +81,11 @@ type KubeSchedulerConfiguration struct {
|
|||
// DEPRECATED.
|
||||
// Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
|
||||
FailureDomains string `json:"failureDomains"`
|
||||
|
||||
// Duration to wait for a binding operation to complete before timing out
|
||||
// Value must be non-negative integer. The value zero indicates no waiting.
|
||||
// If this value is nil, the default value will be used.
|
||||
BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds"`
|
||||
}
|
||||
|
||||
// SchedulerAlgorithmSource is the source of a scheduler algorithm. One source
|
||||
|
|
|
@ -183,6 +183,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
defaultBindTimeout := int64(30)
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
|
@ -195,6 +196,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
BindTimeoutSeconds: &defaultBindTimeout,
|
||||
},
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
|
@ -244,6 +246,7 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
defaultBindTimeout := int64(30)
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
|
@ -256,6 +259,7 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
|||
},
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
BindTimeoutSeconds: &defaultBindTimeout,
|
||||
},
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
|
|
|
@ -91,6 +91,7 @@ func createConfiguratorWithPodInformer(
|
|||
EnableEquivalenceClassCache: utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
DisablePreemption: false,
|
||||
PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
BindTimeoutSeconds: 600,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue