kubelet/cadvisor: Refactor cadvisor disk stat/usage interfaces

pull/6/head
Shaya Potter 2016-05-17 22:05:55 -07:00
parent f3ab7b182f
commit ae08ef38d7
22 changed files with 76 additions and 63 deletions

View File

@ -330,7 +330,7 @@ func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) {
}
if kcfg.CAdvisorInterface == nil {
kcfg.CAdvisorInterface, err = cadvisor.New(s.CAdvisorPort)
kcfg.CAdvisorInterface, err = cadvisor.New(s.CAdvisorPort, kcfg.ContainerRuntime)
if err != nil {
return err
}

View File

@ -28,8 +28,8 @@ type MesosCadvisor struct {
mem uint64
}
func NewMesosCadvisor(cores int, mem uint64, port uint) (*MesosCadvisor, error) {
c, err := cadvisor.New(port)
func NewMesosCadvisor(cores int, mem uint64, port uint, runtime string) (*MesosCadvisor, error) {
c, err := cadvisor.New(port, runtime)
if err != nil {
return nil, err
}

View File

@ -209,7 +209,7 @@ func (s *KubeletExecutorServer) runKubelet(
// create custom cAdvisor interface which return the resource values that Mesos reports
ni := <-nodeInfos
cAdvisorInterface, err := NewMesosCadvisor(ni.Cores, ni.Mem, s.CAdvisorPort)
cAdvisorInterface, err := NewMesosCadvisor(ni.Cores, ni.Mem, s.CAdvisorPort, kcfg.ContainerRuntime)
if err != nil {
return err
}

View File

@ -39,6 +39,7 @@ import (
)
type cadvisorClient struct {
runtime string
manager.Manager
}
@ -60,7 +61,7 @@ func init() {
}
// Creates a cAdvisor and exports its API on the specified port if port > 0.
func New(port uint) (Interface, error) {
func New(port uint, runtime string) (Interface, error) {
sysFs, err := sysfs.NewRealSysFs()
if err != nil {
return nil, err
@ -73,6 +74,7 @@ func New(port uint) (Interface, error) {
}
cadvisorClient := &cadvisorClient{
runtime: runtime,
Manager: m,
}
@ -163,8 +165,19 @@ func (cc *cadvisorClient) MachineInfo() (*cadvisorapi.MachineInfo, error) {
return cc.GetMachineInfo()
}
func (cc *cadvisorClient) DockerImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
return cc.getFsInfo(cadvisorfs.LabelDockerImages)
func (cc *cadvisorClient) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
var label string
switch cc.runtime {
case "docker":
label = cadvisorfs.LabelDockerImages
case "rkt":
label = cadvisorfs.LabelRktImages
default:
return cadvisorapiv2.FsInfo{}, fmt.Errorf("ImagesFsInfo: unknown runtime: %v", cc.runtime)
}
return cc.getFsInfo(label)
}
func (cc *cadvisorClient) RootFsInfo() (cadvisorapiv2.FsInfo, error) {

View File

@ -31,7 +31,7 @@ type cadvisorUnsupported struct {
var _ Interface = new(cadvisorUnsupported)
func New(port uint) (Interface, error) {
func New(port uint, runtime string) (Interface, error) {
return &cadvisorUnsupported{}, nil
}
@ -65,7 +65,7 @@ func (cu *cadvisorUnsupported) VersionInfo() (*cadvisorapi.VersionInfo, error) {
return nil, unsupportedErr
}
func (cu *cadvisorUnsupported) DockerImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
func (cu *cadvisorUnsupported) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, unsupportedErr
}

View File

@ -62,7 +62,7 @@ func (c *Fake) VersionInfo() (*cadvisorapi.VersionInfo, error) {
return new(cadvisorapi.VersionInfo), nil
}
func (c *Fake) DockerImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
func (c *Fake) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, nil
}

View File

@ -69,7 +69,7 @@ func (c *Mock) VersionInfo() (*cadvisorapi.VersionInfo, error) {
return args.Get(0).(*cadvisorapi.VersionInfo), args.Error(1)
}
func (c *Mock) DockerImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
func (c *Mock) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
args := c.Called()
return args.Get(0).(cadvisorapiv2.FsInfo), args.Error(1)
}

View File

@ -34,7 +34,7 @@ type Interface interface {
VersionInfo() (*cadvisorapi.VersionInfo, error)
// Returns usage information about the filesystem holding Docker images.
DockerImagesFsInfo() (cadvisorapiv2.FsInfo, error)
ImagesFsInfo() (cadvisorapiv2.FsInfo, error)
// Returns usage information about the root filesystem.
RootFsInfo() (cadvisorapiv2.FsInfo, error)

View File

@ -35,7 +35,7 @@ const mb = 1024 * 1024
type diskSpaceManager interface {
// Checks the available disk space
IsRootDiskSpaceAvailable() (bool, error)
IsDockerDiskSpaceAvailable() (bool, error)
IsRuntimeDiskSpaceAvailable() (bool, error)
}
type DiskSpacePolicy struct {
@ -83,8 +83,8 @@ func (dm *realDiskSpaceManager) getFsInfo(fsType string, f func() (cadvisorapi.F
return fsi, nil
}
func (dm *realDiskSpaceManager) IsDockerDiskSpaceAvailable() (bool, error) {
return dm.isSpaceAvailable("docker", dm.policy.DockerFreeDiskMB, dm.cadvisor.DockerImagesFsInfo)
func (dm *realDiskSpaceManager) IsRuntimeDiskSpaceAvailable() (bool, error) {
return dm.isSpaceAvailable("runtime", dm.policy.DockerFreeDiskMB, dm.cadvisor.ImagesFsInfo)
}
func (dm *realDiskSpaceManager) IsRootDiskSpaceAvailable() (bool, error) {

View File

@ -61,7 +61,7 @@ func TestSpaceAvailable(t *testing.T) {
dm, err := newDiskSpaceManager(mockCadvisor, policy)
assert.NoError(err)
mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapi.FsInfo{
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{
Usage: 400 * mb,
Capacity: 1000 * mb,
Available: 600 * mb,
@ -71,7 +71,7 @@ func TestSpaceAvailable(t *testing.T) {
Capacity: 10 * mb,
}, nil)
ok, err := dm.IsDockerDiskSpaceAvailable()
ok, err := dm.IsRuntimeDiskSpaceAvailable()
assert.NoError(err)
assert.True(ok)
@ -80,31 +80,31 @@ func TestSpaceAvailable(t *testing.T) {
assert.False(ok)
}
// TestIsDockerDiskSpaceAvailableWithSpace verifies IsDockerDiskSpaceAvailable results when
// TestIsRuntimeDiskSpaceAvailableWithSpace verifies IsRuntimeDiskSpaceAvailable results when
// space is available.
func TestIsDockerDiskSpaceAvailableWithSpace(t *testing.T) {
func TestIsRuntimeDiskSpaceAvailableWithSpace(t *testing.T) {
assert, policy, mockCadvisor := setUp(t)
dm, err := newDiskSpaceManager(mockCadvisor, policy)
require.NoError(t, err)
// 500MB available
mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapi.FsInfo{
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{
Usage: 9500 * mb,
Capacity: 10000 * mb,
Available: 500 * mb,
}, nil)
ok, err := dm.IsDockerDiskSpaceAvailable()
ok, err := dm.IsRuntimeDiskSpaceAvailable()
assert.NoError(err)
assert.True(ok)
}
// TestIsDockerDiskSpaceAvailableWithoutSpace verifies IsDockerDiskSpaceAvailable results when
// TestIsRuntimeDiskSpaceAvailableWithoutSpace verifies IsRuntimeDiskSpaceAvailable results when
// space is not available.
func TestIsDockerDiskSpaceAvailableWithoutSpace(t *testing.T) {
func TestIsRuntimeDiskSpaceAvailableWithoutSpace(t *testing.T) {
// 1MB available
assert, policy, mockCadvisor := setUp(t)
mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapi.FsInfo{
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{
Usage: 999 * mb,
Capacity: 1000 * mb,
Available: 1 * mb,
@ -113,7 +113,7 @@ func TestIsDockerDiskSpaceAvailableWithoutSpace(t *testing.T) {
dm, err := newDiskSpaceManager(mockCadvisor, policy)
require.NoError(t, err)
ok, err := dm.IsDockerDiskSpaceAvailable()
ok, err := dm.IsRuntimeDiskSpaceAvailable()
assert.NoError(err)
assert.False(ok)
}
@ -164,7 +164,7 @@ func TestCache(t *testing.T) {
dm, err := newDiskSpaceManager(mockCadvisor, policy)
assert.NoError(err)
mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapi.FsInfo{
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{
Usage: 400 * mb,
Capacity: 1000 * mb,
Available: 300 * mb,
@ -176,7 +176,7 @@ func TestCache(t *testing.T) {
}, nil).Once()
// Initial calls which should be recorded in mockCadvisor
ok, err := dm.IsDockerDiskSpaceAvailable()
ok, err := dm.IsRuntimeDiskSpaceAvailable()
assert.NoError(err)
assert.True(ok)
@ -188,7 +188,7 @@ func TestCache(t *testing.T) {
cadvisorCallCount := len(mockCadvisor.Calls)
// Checking for space again shouldn't need to mock as cache would serve it.
ok, err = dm.IsDockerDiskSpaceAvailable()
ok, err = dm.IsRuntimeDiskSpaceAvailable()
assert.NoError(err)
assert.True(ok)
@ -208,9 +208,9 @@ func TestFsInfoError(t *testing.T) {
dm, err := newDiskSpaceManager(mockCadvisor, policy)
assert.NoError(err)
mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapi.FsInfo{}, fmt.Errorf("can't find fs"))
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{}, fmt.Errorf("can't find fs"))
mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{}, fmt.Errorf("EBUSY"))
ok, err := dm.IsDockerDiskSpaceAvailable()
ok, err := dm.IsRuntimeDiskSpaceAvailable()
assert.Error(err)
assert.True(ok)
ok, err = dm.IsRootDiskSpaceAvailable()

View File

@ -209,7 +209,7 @@ func (im *realImageManager) detectImages(detectTime time.Time) error {
func (im *realImageManager) GarbageCollect() error {
// Get disk usage on disk holding images.
fsInfo, err := im.cadvisor.DockerImagesFsInfo()
fsInfo, err := im.cadvisor.ImagesFsInfo()
if err != nil {
return err
}

View File

@ -373,7 +373,7 @@ func TestGarbageCollectBelowLowThreshold(t *testing.T) {
manager, _, mockCadvisor := newRealImageManager(policy)
// Expect 40% usage.
mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400,
Capacity: 1000,
}, nil)
@ -388,7 +388,7 @@ func TestGarbageCollectCadvisorFailure(t *testing.T) {
}
manager, _, mockCadvisor := newRealImageManager(policy)
mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, fmt.Errorf("error"))
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, fmt.Errorf("error"))
assert.NotNil(t, manager.GarbageCollect())
}
@ -400,7 +400,7 @@ func TestGarbageCollectBelowSuccess(t *testing.T) {
manager, fakeRuntime, mockCadvisor := newRealImageManager(policy)
// Expect 95% usage and most of it gets freed.
mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 950,
Capacity: 1000,
}, nil)
@ -419,7 +419,7 @@ func TestGarbageCollectNotEnoughFreed(t *testing.T) {
manager, fakeRuntime, mockCadvisor := newRealImageManager(policy)
// Expect 95% usage and little of it gets freed.
mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 950,
Capacity: 1000,
}, nil)

View File

@ -2379,7 +2379,7 @@ func (kl *Kubelet) isOutOfDisk() bool {
outOfDockerDisk := false
outOfRootDisk := false
// Check disk space once globally and reject or accept all new pods.
withinBounds, err := kl.diskSpaceManager.IsDockerDiskSpaceAvailable()
withinBounds, err := kl.diskSpaceManager.IsRuntimeDiskSpaceAvailable()
// Assume enough space in case of errors.
if err == nil && !withinBounds {
outOfDockerDisk = true

View File

@ -50,10 +50,10 @@ func (kl *Kubelet) GetContainerInfoV2(name string, options cadvisorapiv2.Request
return kl.cadvisor.ContainerInfoV2(name, options)
}
// DockerImagesFsInfo returns information about docker image fs usage from
// ImagesFsInfo returns information about docker image fs usage from
// cadvisor.
func (kl *Kubelet) DockerImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
return kl.cadvisor.DockerImagesFsInfo()
func (kl *Kubelet) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
return kl.cadvisor.ImagesFsInfo()
}
// RootFsInfo returns info about the root fs from cadvisor.

View File

@ -299,7 +299,7 @@ func TestSyncLoopAbort(t *testing.T) {
func TestSyncPodsStartPod(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
@ -321,7 +321,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return ready })
@ -1967,7 +1967,7 @@ func TestHandlePortConflicts(t *testing.T) {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl.nodeLister = testNodeLister{nodes: []api.Node{
@ -2028,7 +2028,7 @@ func TestHandleHostNameConflicts(t *testing.T) {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl.nodeLister = testNodeLister{nodes: []api.Node{
@ -2098,7 +2098,7 @@ func TestHandleNodeSelector(t *testing.T) {
kl.nodeLister = testNodeLister{nodes: nodes}
kl.nodeInfo = testNodeInfo{nodes: nodes}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
pods := []*api.Pod{
podWithUidNameNsSpec("123456789", "podA", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
@ -2143,7 +2143,7 @@ func TestHandleMemExceeded(t *testing.T) {
kl.nodeLister = testNodeLister{nodes: nodes}
kl.nodeInfo = testNodeInfo{nodes: nodes}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
spec := api.PodSpec{NodeName: kl.nodeName,
@ -2187,7 +2187,7 @@ func TestHandleMemExceeded(t *testing.T) {
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
@ -2336,7 +2336,7 @@ func TestValidateContainerLogStatus(t *testing.T) {
func updateDiskSpacePolicy(kubelet *Kubelet, mockCadvisor *cadvisortest.Mock, rootCap, dockerCap, rootAvail, dockerAvail uint64, rootThreshold, dockerThreshold int) error {
dockerimagesFsInfo := cadvisorapiv2.FsInfo{Capacity: rootCap * mb, Available: rootAvail * mb}
rootFsInfo := cadvisorapiv2.FsInfo{Capacity: dockerCap * mb, Available: dockerAvail * mb}
mockCadvisor.On("DockerImagesFsInfo").Return(dockerimagesFsInfo, nil)
mockCadvisor.On("ImagesFsInfo").Return(dockerimagesFsInfo, nil)
mockCadvisor.On("RootFsInfo").Return(rootFsInfo, nil)
dsp := DiskSpacePolicy{DockerFreeDiskMB: rootThreshold, RootFreeDiskMB: dockerThreshold}
@ -3099,7 +3099,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
@ -3142,7 +3142,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
@ -3425,7 +3425,7 @@ func TestRegisterExistingNodeWithApiserver(t *testing.T) {
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400 * mb,
Capacity: 1000 * mb,
Available: 600 * mb,
@ -3650,7 +3650,7 @@ func podWithUidNameNsSpec(uid types.UID, name, namespace string, spec api.PodSpe
func TestDeletePodDirsForDeletedPods(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
@ -3697,7 +3697,7 @@ func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*api.Pod
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
@ -3717,7 +3717,7 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
runningPod := &kubecontainer.Pod{
ID: "12345678",
@ -4192,7 +4192,7 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
},
}}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
pods := []*api.Pod{

View File

@ -46,7 +46,7 @@ import (
func TestRunOnce(t *testing.T) {
cadvisor := &cadvisortest.Mock{}
cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
cadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{
cadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400 * mb,
Capacity: 1000 * mb,
Available: 600 * mb,

View File

@ -171,7 +171,7 @@ type HostInterface interface {
GetNode() (*api.Node, error)
GetNodeConfig() cm.NodeConfig
LatestLoopEntryTime() time.Time
DockerImagesFsInfo() (cadvisorapiv2.FsInfo, error)
ImagesFsInfo() (cadvisorapiv2.FsInfo, error)
RootFsInfo() (cadvisorapiv2.FsInfo, error)
ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool)
PLEGHealthCheck() (bool, error)

View File

@ -142,8 +142,8 @@ func (_ *fakeKubelet) GetContainerInfoV2(_ string, _ cadvisorapiv2.RequestOption
return nil, nil
}
func (_ *fakeKubelet) DockerImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, fmt.Errorf("Unsupported Operation DockerImagesFsInfo")
func (_ *fakeKubelet) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, fmt.Errorf("Unsupported Operation ImagesFsInfo")
}
func (_ *fakeKubelet) RootFsInfo() (cadvisorapiv2.FsInfo, error) {

View File

@ -44,7 +44,7 @@ type StatsProvider interface {
GetPodByName(namespace, name string) (*api.Pod, bool)
GetNode() (*api.Node, error)
GetNodeConfig() cm.NodeConfig
DockerImagesFsInfo() (cadvisorapiv2.FsInfo, error)
ImagesFsInfo() (cadvisorapiv2.FsInfo, error)
RootFsInfo() (cadvisorapiv2.FsInfo, error)
ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool)
GetPods() []*api.Pod

View File

@ -162,8 +162,8 @@ func (_m *MockStatsProvider) GetNodeConfig() cm.NodeConfig {
return r0
}
// DockerImagesFsInfo provides a mock function with given fields:
func (_m *MockStatsProvider) DockerImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
// ImagesFsInfo provides a mock function with given fields:
func (_m *MockStatsProvider) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
ret := _m.Called()
var r0 cadvisorapiv2.FsInfo

View File

@ -81,7 +81,7 @@ func (sp *summaryProviderImpl) Get() (*stats.Summary, error) {
if err != nil {
return nil, err
}
imageFsInfo, err := sp.provider.DockerImagesFsInfo()
imageFsInfo, err := sp.provider.ImagesFsInfo()
if err != nil {
return nil, err
}

View File

@ -99,7 +99,7 @@ func containerRuntime() error {
}
// Setup cadvisor to check the container environment
c, err := cadvisor.New(0 /*don't start the http server*/)
c, err := cadvisor.New(0 /*don't start the http server*/, "docker")
if err != nil {
return printError("Container Runtime Check: %s Could not start cadvisor %v", failed, err)
}