mirror of https://github.com/k3s-io/k3s
Merge pull request #34000 from wojtek-t/set_cache_size
Automatic merge from submit-queue Set deserialization cache size based on target memory usage **Special notes for your reviewer**: This is the PR we talked about yesterday. **Release note**: ```release-note To reduce memory usage to reasonable levels in smaller clusters, kube-apiserver now sets the deserialization cache size based on the target memory usage. ```pull/6/head
commit
bd3664cbef
|
@ -145,6 +145,28 @@ func Run(s *options.APIServer) error {
|
|||
glog.Fatalf("Failed to start kubelet client: %v", err)
|
||||
}
|
||||
|
||||
if s.StorageConfig.DeserializationCacheSize == 0 {
|
||||
// When size of cache is not explicitly set, estimate its size based on
|
||||
// target memory usage.
|
||||
glog.V(2).Infof("Initalizing deserialization cache size based on %dMB limit", s.TargetRAMMB)
|
||||
|
||||
// This is the heuristics that from memory capacity is trying to infer
|
||||
// the maximum number of nodes in the cluster and set cache sizes based
|
||||
// on that value.
|
||||
// From our documentation, we officially recomment 120GB machines for
|
||||
// 2000 nodes, and we scale from that point. Thus we assume ~60MB of
|
||||
// capacity per node.
|
||||
// TODO: We may consider deciding that some percentage of memory will
|
||||
// be used for the deserialization cache and divide it by the max object
|
||||
// size to compute its size. We may even go further and measure
|
||||
// collective sizes of the objects in the cache.
|
||||
clusterSize := s.TargetRAMMB / 60
|
||||
s.StorageConfig.DeserializationCacheSize = 25 * clusterSize
|
||||
if s.StorageConfig.DeserializationCacheSize < 1000 {
|
||||
s.StorageConfig.DeserializationCacheSize = 1000
|
||||
}
|
||||
}
|
||||
|
||||
storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion()
|
||||
if err != nil {
|
||||
glog.Fatalf("error generating storage version map: %s", err)
|
||||
|
|
|
@ -82,6 +82,10 @@ func Run(s *options.ServerRunOptions) error {
|
|||
// TODO: register cluster federation resources here.
|
||||
resourceConfig := genericapiserver.NewResourceConfig()
|
||||
|
||||
if s.StorageConfig.DeserializationCacheSize == 0 {
|
||||
// When size of cache is not explicitly set, set it to 50000
|
||||
s.StorageConfig.DeserializationCacheSize = 50000
|
||||
}
|
||||
storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion()
|
||||
if err != nil {
|
||||
glog.Fatalf("error generating storage version map: %s", err)
|
||||
|
|
|
@ -37,8 +37,6 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
DefaultDeserializationCacheSize = 50000
|
||||
|
||||
// TODO: This can be tightened up. It still matches objects named watch or proxy.
|
||||
defaultLongRunningRequestRE = "(/|^)((watch|proxy)(/|$)|(logs?|portforward|exec|attach)/?$)"
|
||||
)
|
||||
|
@ -158,7 +156,9 @@ func NewServerRunOptions() *ServerRunOptions {
|
|||
func (o *ServerRunOptions) WithEtcdOptions() *ServerRunOptions {
|
||||
o.StorageConfig = storagebackend.Config{
|
||||
Prefix: DefaultEtcdPathPrefix,
|
||||
DeserializationCacheSize: DefaultDeserializationCacheSize,
|
||||
// Default cache size to 0 - if unset, its size will be set based on target
|
||||
// memory usage.
|
||||
DeserializationCacheSize: 0,
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue