mirror of https://github.com/k3s-io/k3s
Merge pull request #53094 from yguo0905/fix
Automatic merge from submit-queue (batch tested with PRs 51021, 53225, 53094, 53219). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Change ImageGCManage to consume ImageFS stats from StatsProvider Fixes #53083. **Release note**: ``` Change ImageGCManage to consume ImageFS stats from StatsProvider ``` /assign @Random-Liupull/6/head
commit
a0b7d467e2
|
@ -17,7 +17,7 @@ go_library(
|
|||
"types.go",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
|
@ -40,10 +40,10 @@ go_test(
|
|||
],
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/kubelet/cadvisor/testing:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
|
||||
"//pkg/kubelet/server/stats/testing:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package images
|
||||
|
||||
import (
|
||||
goerrors "errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
|
@ -24,17 +25,25 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
)
|
||||
|
||||
// StatsProvider is an interface for fetching stats used during image garbage
|
||||
// collection.
|
||||
type StatsProvider interface {
|
||||
// ImageFsStats returns the stats of the image filesystem.
|
||||
ImageFsStats() (*statsapi.FsStats, error)
|
||||
}
|
||||
|
||||
// Manages lifecycle of all images.
|
||||
//
|
||||
// Implementation is thread-safe.
|
||||
|
@ -78,8 +87,8 @@ type realImageGCManager struct {
|
|||
// The image garbage collection policy in use.
|
||||
policy ImageGCPolicy
|
||||
|
||||
// cAdvisor instance.
|
||||
cadvisor cadvisor.Interface
|
||||
// statsProvider provides stats used during image garbage collection.
|
||||
statsProvider StatsProvider
|
||||
|
||||
// Recorder for Kubernetes events.
|
||||
recorder record.EventRecorder
|
||||
|
@ -128,7 +137,7 @@ type imageRecord struct {
|
|||
size int64
|
||||
}
|
||||
|
||||
func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Interface, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) {
|
||||
func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) {
|
||||
// Validate policy.
|
||||
if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 {
|
||||
return nil, fmt.Errorf("invalid HighThresholdPercent %d, must be in range [0-100]", policy.HighThresholdPercent)
|
||||
|
@ -140,13 +149,13 @@ func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Int
|
|||
return nil, fmt.Errorf("LowThresholdPercent %d can not be higher than HighThresholdPercent %d", policy.LowThresholdPercent, policy.HighThresholdPercent)
|
||||
}
|
||||
im := &realImageGCManager{
|
||||
runtime: runtime,
|
||||
policy: policy,
|
||||
imageRecords: make(map[string]*imageRecord),
|
||||
cadvisor: cadvisorInterface,
|
||||
recorder: recorder,
|
||||
nodeRef: nodeRef,
|
||||
initialized: false,
|
||||
runtime: runtime,
|
||||
policy: policy,
|
||||
imageRecords: make(map[string]*imageRecord),
|
||||
statsProvider: statsProvider,
|
||||
recorder: recorder,
|
||||
nodeRef: nodeRef,
|
||||
initialized: false,
|
||||
}
|
||||
|
||||
return im, nil
|
||||
|
@ -244,12 +253,19 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) error {
|
|||
|
||||
func (im *realImageGCManager) GarbageCollect() error {
|
||||
// Get disk usage on disk holding images.
|
||||
fsInfo, err := im.cadvisor.ImagesFsInfo()
|
||||
fsStats, err := im.statsProvider.ImageFsStats()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
capacity := int64(fsInfo.Capacity)
|
||||
available := int64(fsInfo.Available)
|
||||
|
||||
var capacity, available int64
|
||||
if fsStats.CapacityBytes != nil {
|
||||
capacity = int64(*fsStats.CapacityBytes)
|
||||
}
|
||||
if fsStats.AvailableBytes != nil {
|
||||
available = int64(*fsStats.AvailableBytes)
|
||||
}
|
||||
|
||||
if available > capacity {
|
||||
glog.Warningf("available %d is larger than capacity %d", available, capacity)
|
||||
available = capacity
|
||||
|
@ -257,7 +273,7 @@ func (im *realImageGCManager) GarbageCollect() error {
|
|||
|
||||
// Check valid capacity.
|
||||
if capacity == 0 {
|
||||
err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint)
|
||||
err := goerrors.New("invalid capacity 0 on image filesystem")
|
||||
im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.InvalidDiskCapacity, err.Error())
|
||||
return err
|
||||
}
|
||||
|
@ -266,7 +282,7 @@ func (im *realImageGCManager) GarbageCollect() error {
|
|||
usagePercent := 100 - int(available*100/capacity)
|
||||
if usagePercent >= im.policy.HighThresholdPercent {
|
||||
amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available
|
||||
glog.Infof("[imageGCManager]: Disk usage on %q (%s) is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes", fsInfo.Device, fsInfo.Mountpoint, usagePercent, im.policy.HighThresholdPercent, amountToFree)
|
||||
glog.Infof("[imageGCManager]: Disk usage on image filesystem is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes", usagePercent, im.policy.HighThresholdPercent, amountToFree)
|
||||
freed, err := im.freeSpace(amountToFree, time.Now())
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -21,28 +21,29 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
statstest "k8s.io/kubernetes/pkg/kubelet/server/stats/testing"
|
||||
)
|
||||
|
||||
var zero time.Time
|
||||
|
||||
func newRealImageGCManager(policy ImageGCPolicy) (*realImageGCManager, *containertest.FakeRuntime, *cadvisortest.Mock) {
|
||||
func newRealImageGCManager(policy ImageGCPolicy) (*realImageGCManager, *containertest.FakeRuntime, *statstest.StatsProvider) {
|
||||
fakeRuntime := &containertest.FakeRuntime{}
|
||||
mockCadvisor := new(cadvisortest.Mock)
|
||||
mockStatsProvider := new(statstest.StatsProvider)
|
||||
return &realImageGCManager{
|
||||
runtime: fakeRuntime,
|
||||
policy: policy,
|
||||
imageRecords: make(map[string]*imageRecord),
|
||||
cadvisor: mockCadvisor,
|
||||
recorder: &record.FakeRecorder{},
|
||||
}, fakeRuntime, mockCadvisor
|
||||
runtime: fakeRuntime,
|
||||
policy: policy,
|
||||
imageRecords: make(map[string]*imageRecord),
|
||||
statsProvider: mockStatsProvider,
|
||||
recorder: &record.FakeRecorder{},
|
||||
}, fakeRuntime, mockStatsProvider
|
||||
}
|
||||
|
||||
// Accessors used for thread-safe testing.
|
||||
|
@ -356,12 +357,12 @@ func TestGarbageCollectBelowLowThreshold(t *testing.T) {
|
|||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
}
|
||||
manager, _, mockCadvisor := newRealImageGCManager(policy)
|
||||
manager, _, mockStatsProvider := newRealImageGCManager(policy)
|
||||
|
||||
// Expect 40% usage.
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Available: 600,
|
||||
Capacity: 1000,
|
||||
mockStatsProvider.On("ImageFsStats").Return(&statsapi.FsStats{
|
||||
AvailableBytes: uint64Ptr(600),
|
||||
CapacityBytes: uint64Ptr(1000),
|
||||
}, nil)
|
||||
|
||||
assert.NoError(t, manager.GarbageCollect())
|
||||
|
@ -372,9 +373,9 @@ func TestGarbageCollectCadvisorFailure(t *testing.T) {
|
|||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
}
|
||||
manager, _, mockCadvisor := newRealImageGCManager(policy)
|
||||
manager, _, mockStatsProvider := newRealImageGCManager(policy)
|
||||
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, fmt.Errorf("error"))
|
||||
mockStatsProvider.On("ImageFsStats").Return(&statsapi.FsStats{}, fmt.Errorf("error"))
|
||||
assert.NotNil(t, manager.GarbageCollect())
|
||||
}
|
||||
|
||||
|
@ -383,12 +384,12 @@ func TestGarbageCollectBelowSuccess(t *testing.T) {
|
|||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
}
|
||||
manager, fakeRuntime, mockCadvisor := newRealImageGCManager(policy)
|
||||
manager, fakeRuntime, mockStatsProvider := newRealImageGCManager(policy)
|
||||
|
||||
// Expect 95% usage and most of it gets freed.
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Available: 50,
|
||||
Capacity: 1000,
|
||||
mockStatsProvider.On("ImageFsStats").Return(&statsapi.FsStats{
|
||||
AvailableBytes: uint64Ptr(50),
|
||||
CapacityBytes: uint64Ptr(1000),
|
||||
}, nil)
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 450),
|
||||
|
@ -402,12 +403,12 @@ func TestGarbageCollectNotEnoughFreed(t *testing.T) {
|
|||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
}
|
||||
manager, fakeRuntime, mockCadvisor := newRealImageGCManager(policy)
|
||||
manager, fakeRuntime, mockStatsProvider := newRealImageGCManager(policy)
|
||||
|
||||
// Expect 95% usage and little of it gets freed.
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Available: 50,
|
||||
Capacity: 1000,
|
||||
mockStatsProvider.On("ImageFsStats").Return(&statsapi.FsStats{
|
||||
AvailableBytes: uint64Ptr(50),
|
||||
CapacityBytes: uint64Ptr(1000),
|
||||
}, nil)
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
makeImage(0, 50),
|
||||
|
@ -423,13 +424,13 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) {
|
|||
MinAge: time.Minute * 1,
|
||||
}
|
||||
fakeRuntime := &containertest.FakeRuntime{}
|
||||
mockCadvisor := new(cadvisortest.Mock)
|
||||
mockStatsProvider := new(statstest.StatsProvider)
|
||||
manager := &realImageGCManager{
|
||||
runtime: fakeRuntime,
|
||||
policy: policy,
|
||||
imageRecords: make(map[string]*imageRecord),
|
||||
cadvisor: mockCadvisor,
|
||||
recorder: &record.FakeRecorder{},
|
||||
runtime: fakeRuntime,
|
||||
policy: policy,
|
||||
imageRecords: make(map[string]*imageRecord),
|
||||
statsProvider: mockStatsProvider,
|
||||
recorder: &record.FakeRecorder{},
|
||||
}
|
||||
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
|
@ -523,3 +524,7 @@ func TestValidateImageGCPolicy(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func uint64Ptr(i uint64) *uint64 {
|
||||
return &i
|
||||
}
|
||||
|
|
|
@ -720,7 +720,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, integer.IntMax(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod))
|
||||
|
||||
// setup imageManager
|
||||
imageManager, err := images.NewImageGCManager(klet.containerRuntime, kubeDeps.CAdvisorInterface, kubeDeps.Recorder, nodeRef, imageGCPolicy)
|
||||
imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize image manager: %v", err)
|
||||
}
|
||||
|
|
|
@ -241,11 +241,21 @@ func newTestKubeletWithImageList(
|
|||
UID: types.UID(testKubeletHostname),
|
||||
Namespace: "",
|
||||
}
|
||||
|
||||
volumeStatsAggPeriod := time.Second * 10
|
||||
kubelet.resourceAnalyzer = serverstats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod)
|
||||
|
||||
kubelet.StatsProvider = stats.NewCadvisorStatsProvider(
|
||||
kubelet.cadvisor,
|
||||
kubelet.resourceAnalyzer,
|
||||
kubelet.podManager,
|
||||
kubelet.runtimeCache,
|
||||
fakeRuntime)
|
||||
fakeImageGCPolicy := images.ImageGCPolicy{
|
||||
HighThresholdPercent: 90,
|
||||
LowThresholdPercent: 80,
|
||||
}
|
||||
imageGCManager, err := images.NewImageGCManager(fakeRuntime, mockCadvisor, fakeRecorder, fakeNodeRef, fakeImageGCPolicy)
|
||||
imageGCManager, err := images.NewImageGCManager(fakeRuntime, kubelet.StatsProvider, fakeRecorder, fakeNodeRef, fakeImageGCPolicy)
|
||||
assert.NoError(t, err)
|
||||
kubelet.imageManager = &fakeImageGCManager{
|
||||
fakeImageService: fakeRuntime,
|
||||
|
@ -262,9 +272,6 @@ func newTestKubeletWithImageList(
|
|||
kubelet.clock = fakeClock
|
||||
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
|
||||
|
||||
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
||||
volumeStatsAggPeriod := time.Second * 10
|
||||
kubelet.resourceAnalyzer = serverstats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string(kubelet.nodeName),
|
||||
|
@ -307,12 +314,6 @@ func newTestKubeletWithImageList(
|
|||
kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
|
||||
kubelet.AddPodSyncHandler(activeDeadlineHandler)
|
||||
kubelet.gpuManager = gpu.NewGPUManagerStub()
|
||||
kubelet.StatsProvider = stats.NewCadvisorStatsProvider(
|
||||
kubelet.cadvisor,
|
||||
kubelet.resourceAnalyzer,
|
||||
kubelet.podManager,
|
||||
kubelet.runtimeCache,
|
||||
fakeRuntime)
|
||||
return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue