mirror of https://github.com/k3s-io/k3s
Change StatsProvider interface to provide container stats from either cadvisor or CRI and implement this interface using cadvisor
parent
acdf625e46
commit
f9767d2f71
|
@ -279,6 +279,7 @@ pkg/kubelet/server/portforward
|
|||
pkg/kubelet/server/remotecommand
|
||||
pkg/kubelet/server/stats
|
||||
pkg/kubelet/server/streaming
|
||||
pkg/kubelet/stats
|
||||
pkg/kubelet/status
|
||||
pkg/kubelet/status/testing
|
||||
pkg/kubelet/sysctl
|
||||
|
|
|
@ -12,7 +12,6 @@ go_library(
|
|||
"active_deadline.go",
|
||||
"doc.go",
|
||||
"kubelet.go",
|
||||
"kubelet_cadvisor.go",
|
||||
"kubelet_getters.go",
|
||||
"kubelet_network.go",
|
||||
"kubelet_node_status.go",
|
||||
|
@ -79,6 +78,7 @@ go_library(
|
|||
"//pkg/kubelet/server/remotecommand:go_default_library",
|
||||
"//pkg/kubelet/server/stats:go_default_library",
|
||||
"//pkg/kubelet/server/streaming:go_default_library",
|
||||
"//pkg/kubelet/stats:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/sysctl:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
|
@ -145,7 +145,6 @@ go_test(
|
|||
name = "go_default_test",
|
||||
srcs = [
|
||||
"active_deadline_test.go",
|
||||
"kubelet_cadvisor_test.go",
|
||||
"kubelet_getters_test.go",
|
||||
"kubelet_network_test.go",
|
||||
"kubelet_node_status_test.go",
|
||||
|
@ -192,6 +191,7 @@ go_test(
|
|||
"//pkg/kubelet/server/portforward:go_default_library",
|
||||
"//pkg/kubelet/server/remotecommand:go_default_library",
|
||||
"//pkg/kubelet/server/stats:go_default_library",
|
||||
"//pkg/kubelet/stats:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/status/testing:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
|
@ -274,6 +274,7 @@ filegroup(
|
|||
"//pkg/kubelet/rktshim:all-srcs",
|
||||
"//pkg/kubelet/secret:all-srcs",
|
||||
"//pkg/kubelet/server:all-srcs",
|
||||
"//pkg/kubelet/stats:all-srcs",
|
||||
"//pkg/kubelet/status:all-srcs",
|
||||
"//pkg/kubelet/sysctl:all-srcs",
|
||||
"//pkg/kubelet/types:all-srcs",
|
||||
|
|
|
@ -1,10 +1,4 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
|
@ -27,6 +21,7 @@ go_library(
|
|||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||
|
@ -96,4 +91,5 @@ filegroup(
|
|||
"//pkg/kubelet/container/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
|
|
@ -1,9 +1,4 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
|
@ -11,10 +6,12 @@ go_library(
|
|||
"fake_cache.go",
|
||||
"fake_runtime.go",
|
||||
"fake_runtime_helper.go",
|
||||
"mock_runtime_cache.go",
|
||||
"mockfileinfo.go",
|
||||
"os.go",
|
||||
"runtime_mock.go",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
@ -38,4 +35,5 @@ filegroup(
|
|||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by mockery v1.0.0
|
||||
package testing
|
||||
|
||||
import container "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
import time "time"
|
||||
|
||||
// MockRuntimeCache is an autogenerated mock type for the RuntimeCache type
|
||||
type MockRuntimeCache struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ForceUpdateIfOlder provides a mock function with given fields: _a0
|
||||
func (_m *MockRuntimeCache) ForceUpdateIfOlder(_a0 time.Time) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(time.Time) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetPods provides a mock function with given fields:
|
||||
func (_m *MockRuntimeCache) GetPods() ([]*container.Pod, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []*container.Pod
|
||||
if rf, ok := ret.Get(0).(func() []*container.Pod); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*container.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
|
@ -88,8 +88,9 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/rkt"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
|
||||
"k8s.io/kubernetes/pkg/kubelet/stats"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
|
@ -559,6 +560,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
MTU: int(crOptions.NetworkPluginMTU),
|
||||
}
|
||||
|
||||
klet.resourceAnalyzer = serverstats.NewResourceAnalyzer(klet, kubeCfg.VolumeStatsAggPeriod.Duration)
|
||||
|
||||
// Remote runtime shim just cannot talk back to kubelet, so it doesn't
|
||||
// support bandwidth shaping or hostports till #35457. To enable legacy
|
||||
// features, replace with networkHost.
|
||||
|
@ -641,6 +644,12 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
}
|
||||
klet.containerRuntime = runtime
|
||||
klet.runner = runtime
|
||||
klet.StatsProvider = stats.NewCadvisorStatsProvider(
|
||||
klet.cadvisor,
|
||||
klet.resourceAnalyzer,
|
||||
klet.podManager,
|
||||
klet.runtimeCache,
|
||||
klet.containerRuntime)
|
||||
} else {
|
||||
// rkt uses the legacy, non-CRI, integration. Configure it the old way.
|
||||
// TODO: Include hairpin mode settings in rkt?
|
||||
|
@ -673,11 +682,14 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
}
|
||||
klet.containerRuntime = runtime
|
||||
klet.runner = kubecontainer.DirectStreamingRunner(runtime)
|
||||
klet.StatsProvider = stats.NewCadvisorStatsProvider(
|
||||
klet.cadvisor,
|
||||
klet.resourceAnalyzer,
|
||||
klet.podManager,
|
||||
klet.runtimeCache,
|
||||
klet.containerRuntime)
|
||||
}
|
||||
|
||||
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
||||
klet.resourceAnalyzer = stats.NewResourceAnalyzer(klet, kubeCfg.VolumeStatsAggPeriod.Duration, klet.containerRuntime)
|
||||
|
||||
klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, clock.RealClock{})
|
||||
klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
|
||||
klet.runtimeState.addHealthCheck("PLEG", klet.pleg.Healthy)
|
||||
|
@ -992,7 +1004,7 @@ type Kubelet struct {
|
|||
oomWatcher OOMWatcher
|
||||
|
||||
// Monitor resource usage
|
||||
resourceAnalyzer stats.ResourceAnalyzer
|
||||
resourceAnalyzer serverstats.ResourceAnalyzer
|
||||
|
||||
// Whether or not we should have the QOS cgroup hierarchy for resource management
|
||||
cgroupsPerQOS bool
|
||||
|
@ -1103,6 +1115,9 @@ type Kubelet struct {
|
|||
// dockerLegacyService contains some legacy methods for backward compatibility.
|
||||
// It should be set only when docker is using non json-file logging driver.
|
||||
dockerLegacyService dockershim.DockerLegacyService
|
||||
|
||||
// StatsProvider provides the node and the container stats.
|
||||
*stats.StatsProvider
|
||||
}
|
||||
|
||||
func allLocalIPsWithoutLoopback() ([]net.IP, error) {
|
||||
|
|
|
@ -1,108 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
// GetContainerInfo returns stats (from Cadvisor) for a container.
|
||||
func (kl *Kubelet) GetContainerInfo(podFullName string, podUID types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) {
|
||||
|
||||
// Resolve and type convert back again.
|
||||
// We need the static pod UID but the kubecontainer API works with types.UID.
|
||||
podUID = types.UID(kl.podManager.TranslatePodUID(podUID))
|
||||
|
||||
pods, err := kl.runtimeCache.GetPods()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
|
||||
container := pod.FindContainerByName(containerName)
|
||||
if container == nil {
|
||||
return nil, kubecontainer.ErrContainerNotFound
|
||||
}
|
||||
|
||||
ci, err := kl.cadvisor.DockerContainer(container.ID.ID, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ci, nil
|
||||
}
|
||||
|
||||
// GetContainerInfoV2 returns stats (from Cadvisor) for containers.
|
||||
func (kl *Kubelet) GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) {
|
||||
return kl.cadvisor.ContainerInfoV2(name, options)
|
||||
}
|
||||
|
||||
// ImagesFsInfo returns information about docker image fs usage from
|
||||
// cadvisor.
|
||||
func (kl *Kubelet) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
|
||||
return kl.cadvisor.ImagesFsInfo()
|
||||
}
|
||||
|
||||
// RootFsInfo returns info about the root fs from cadvisor.
|
||||
func (kl *Kubelet) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
|
||||
return kl.cadvisor.RootFsInfo()
|
||||
}
|
||||
|
||||
// GetRawContainerInfo returns stats (from Cadvisor) for a non-Kubernetes container.
|
||||
func (kl *Kubelet) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) {
|
||||
if subcontainers {
|
||||
return kl.cadvisor.SubcontainerInfo(containerName, req)
|
||||
}
|
||||
|
||||
containerInfo, err := kl.cadvisor.ContainerInfo(containerName, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]*cadvisorapi.ContainerInfo{
|
||||
containerInfo.Name: containerInfo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetVersionInfo returns information about the version of cAdvisor in use.
|
||||
func (kl *Kubelet) GetVersionInfo() (*cadvisorapi.VersionInfo, error) {
|
||||
return kl.cadvisor.VersionInfo()
|
||||
}
|
||||
|
||||
// GetCachedMachineInfo assumes that the machine info can't change without a reboot
|
||||
func (kl *Kubelet) GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error) {
|
||||
if kl.machineInfo == nil {
|
||||
info, err := kl.cadvisor.MachineInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kl.machineInfo = info
|
||||
}
|
||||
return kl.machineInfo, nil
|
||||
}
|
||||
|
||||
// GetCachedRootFsInfo assumes that the rootfs info can't change without a reboot
|
||||
func (kl *Kubelet) GetCachedRootFsInfo() (cadvisorapiv2.FsInfo, error) {
|
||||
if kl.rootfsInfo == nil {
|
||||
info, err := kl.cadvisor.RootFsInfo()
|
||||
if err != nil {
|
||||
return cadvisorapiv2.FsInfo{}, err
|
||||
}
|
||||
kl.rootfsInfo = &info
|
||||
}
|
||||
return *kl.rootfsInfo, nil
|
||||
}
|
|
@ -1,252 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
)
|
||||
|
||||
func TestGetContainerInfo(t *testing.T) {
|
||||
cadvisorAPIFailure := fmt.Errorf("cAdvisor failure")
|
||||
runtimeError := fmt.Errorf("List containers error")
|
||||
tests := []struct {
|
||||
name string
|
||||
containerID string
|
||||
containerPath string
|
||||
cadvisorContainerInfo cadvisorapi.ContainerInfo
|
||||
runtimeError error
|
||||
podList []*kubecontainertest.FakePod
|
||||
requestedPodFullName string
|
||||
requestedPodUID types.UID
|
||||
requestedContainerName string
|
||||
expectDockerContainerCall bool
|
||||
mockError error
|
||||
expectedError error
|
||||
expectStats bool
|
||||
}{
|
||||
{
|
||||
name: "get container info",
|
||||
containerID: "ab2cdf",
|
||||
containerPath: "/docker/ab2cdf",
|
||||
cadvisorContainerInfo: cadvisorapi.ContainerInfo{
|
||||
ContainerReference: cadvisorapi.ContainerReference{
|
||||
Name: "/docker/ab2cdf",
|
||||
},
|
||||
},
|
||||
runtimeError: nil,
|
||||
podList: []*kubecontainertest.FakePod{
|
||||
{
|
||||
Pod: &kubecontainer.Pod{
|
||||
ID: "12345678",
|
||||
Name: "qux",
|
||||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "ab2cdf"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
requestedPodFullName: "qux_ns",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
expectDockerContainerCall: true,
|
||||
mockError: nil,
|
||||
expectedError: nil,
|
||||
expectStats: true,
|
||||
},
|
||||
{
|
||||
name: "get container info when cadvisor failed",
|
||||
containerID: "ab2cdf",
|
||||
containerPath: "/docker/ab2cdf",
|
||||
cadvisorContainerInfo: cadvisorapi.ContainerInfo{},
|
||||
runtimeError: nil,
|
||||
podList: []*kubecontainertest.FakePod{
|
||||
{
|
||||
Pod: &kubecontainer.Pod{
|
||||
ID: "uuid",
|
||||
Name: "qux",
|
||||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "ab2cdf"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
requestedPodFullName: "qux_ns",
|
||||
requestedPodUID: "uuid",
|
||||
requestedContainerName: "foo",
|
||||
expectDockerContainerCall: true,
|
||||
mockError: cadvisorAPIFailure,
|
||||
expectedError: cadvisorAPIFailure,
|
||||
expectStats: false,
|
||||
},
|
||||
{
|
||||
name: "get container info on non-existent container",
|
||||
containerID: "",
|
||||
containerPath: "",
|
||||
cadvisorContainerInfo: cadvisorapi.ContainerInfo{},
|
||||
runtimeError: nil,
|
||||
podList: []*kubecontainertest.FakePod{},
|
||||
requestedPodFullName: "qux",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
expectDockerContainerCall: false,
|
||||
mockError: nil,
|
||||
expectedError: kubecontainer.ErrContainerNotFound,
|
||||
expectStats: false,
|
||||
},
|
||||
{
|
||||
name: "get container info when container runtime failed",
|
||||
containerID: "",
|
||||
containerPath: "",
|
||||
cadvisorContainerInfo: cadvisorapi.ContainerInfo{},
|
||||
runtimeError: runtimeError,
|
||||
podList: []*kubecontainertest.FakePod{},
|
||||
requestedPodFullName: "qux",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
mockError: nil,
|
||||
expectedError: runtimeError,
|
||||
expectStats: false,
|
||||
},
|
||||
{
|
||||
name: "get container info with no containers",
|
||||
containerID: "",
|
||||
containerPath: "",
|
||||
cadvisorContainerInfo: cadvisorapi.ContainerInfo{},
|
||||
runtimeError: nil,
|
||||
podList: []*kubecontainertest.FakePod{},
|
||||
requestedPodFullName: "qux_ns",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
mockError: nil,
|
||||
expectedError: kubecontainer.ErrContainerNotFound,
|
||||
expectStats: false,
|
||||
},
|
||||
{
|
||||
name: "get container info with no matching containers",
|
||||
containerID: "",
|
||||
containerPath: "",
|
||||
cadvisorContainerInfo: cadvisorapi.ContainerInfo{},
|
||||
runtimeError: nil,
|
||||
podList: []*kubecontainertest.FakePod{
|
||||
{
|
||||
Pod: &kubecontainer.Pod{
|
||||
ID: "12345678",
|
||||
Name: "qux",
|
||||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
Name: "bar",
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "fakeID"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
requestedPodFullName: "qux_ns",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
mockError: nil,
|
||||
expectedError: kubecontainer.ErrContainerNotFound,
|
||||
expectStats: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnablec */)
|
||||
defer testKubelet.Cleanup()
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
kubelet := testKubelet.kubelet
|
||||
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
if tc.expectDockerContainerCall {
|
||||
mockCadvisor.On("DockerContainer", tc.containerID, cadvisorReq).Return(tc.cadvisorContainerInfo, tc.mockError)
|
||||
}
|
||||
fakeRuntime.Err = tc.runtimeError
|
||||
fakeRuntime.PodList = tc.podList
|
||||
|
||||
stats, err := kubelet.GetContainerInfo(tc.requestedPodFullName, tc.requestedPodUID, tc.requestedContainerName, cadvisorReq)
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
|
||||
if tc.expectStats {
|
||||
require.NotNil(t, stats)
|
||||
}
|
||||
mockCadvisor.AssertExpectations(t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRawContainerInfoRoot(t *testing.T) {
|
||||
containerPath := "/"
|
||||
containerInfo := &cadvisorapi.ContainerInfo{
|
||||
ContainerReference: cadvisorapi.ContainerReference{
|
||||
Name: containerPath,
|
||||
},
|
||||
}
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
||||
mockCadvisor.On("ContainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil)
|
||||
|
||||
_, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, false)
|
||||
assert.NoError(t, err)
|
||||
mockCadvisor.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestGetRawContainerInfoSubcontainers(t *testing.T) {
|
||||
containerPath := "/kubelet"
|
||||
containerInfo := map[string]*cadvisorapi.ContainerInfo{
|
||||
containerPath: {
|
||||
ContainerReference: cadvisorapi.ContainerReference{
|
||||
Name: containerPath,
|
||||
},
|
||||
},
|
||||
"/kubelet/sub": {
|
||||
ContainerReference: cadvisorapi.ContainerReference{
|
||||
Name: "/kubelet/sub",
|
||||
},
|
||||
},
|
||||
}
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
||||
mockCadvisor.On("SubcontainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil)
|
||||
|
||||
result, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, true)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, result, 2)
|
||||
mockCadvisor.AssertExpectations(t)
|
||||
}
|
|
@ -23,6 +23,8 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/cmd/kubelet/app/options"
|
||||
|
@ -239,3 +241,20 @@ func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, err
|
|||
}
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
// GetVersionInfo returns information about the version of cAdvisor in use.
|
||||
func (kl *Kubelet) GetVersionInfo() (*cadvisorapiv1.VersionInfo, error) {
|
||||
return kl.cadvisor.VersionInfo()
|
||||
}
|
||||
|
||||
// GetCachedMachineInfo assumes that the machine info can't change without a reboot
|
||||
func (kl *Kubelet) GetCachedMachineInfo() (*cadvisorapiv1.MachineInfo, error) {
|
||||
if kl.machineInfo == nil {
|
||||
info, err := kl.cadvisor.MachineInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kl.machineInfo = info
|
||||
}
|
||||
return kl.machineInfo, nil
|
||||
}
|
||||
|
|
|
@ -60,7 +60,8 @@ import (
|
|||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
"k8s.io/kubernetes/pkg/kubelet/stats"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
|
@ -256,7 +257,7 @@ func newTestKubeletWithImageList(
|
|||
|
||||
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
||||
volumeStatsAggPeriod := time.Second * 10
|
||||
kubelet.resourceAnalyzer = stats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.containerRuntime)
|
||||
kubelet.resourceAnalyzer = serverstats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string(kubelet.nodeName),
|
||||
|
@ -298,6 +299,12 @@ func newTestKubeletWithImageList(
|
|||
kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
|
||||
kubelet.AddPodSyncHandler(activeDeadlineHandler)
|
||||
kubelet.gpuManager = gpu.NewGPUManagerStub()
|
||||
kubelet.StatsProvider = stats.NewCadvisorStatsProvider(
|
||||
kubelet.cadvisor,
|
||||
kubelet.resourceAnalyzer,
|
||||
kubelet.podManager,
|
||||
kubelet.runtimeCache,
|
||||
fakeRuntime)
|
||||
return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
|
||||
}
|
||||
|
||||
|
|
|
@ -7,10 +7,16 @@ load(
|
|||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["fake_mirror_client.go"],
|
||||
srcs = [
|
||||
"fake_mirror_client.go",
|
||||
"mock_manager.go",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/mock:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -0,0 +1,291 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by mockery v1.0.0
|
||||
package testing
|
||||
|
||||
import kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
|
||||
import types "k8s.io/apimachinery/pkg/types"
|
||||
import v1 "k8s.io/api/core/v1"
|
||||
|
||||
// MockManager is an autogenerated mock type for the Manager type
|
||||
type MockManager struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// AddPod provides a mock function with given fields: _a0
|
||||
func (_m *MockManager) AddPod(_a0 *v1.Pod) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// CreateMirrorPod provides a mock function with given fields: _a0
|
||||
func (_m *MockManager) CreateMirrorPod(_a0 *v1.Pod) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeleteMirrorPod provides a mock function with given fields: podFullName
|
||||
func (_m *MockManager) DeleteMirrorPod(podFullName string) error {
|
||||
ret := _m.Called(podFullName)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string) error); ok {
|
||||
r0 = rf(podFullName)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeleteOrphanedMirrorPods provides a mock function with given fields:
|
||||
func (_m *MockManager) DeleteOrphanedMirrorPods() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// DeletePod provides a mock function with given fields: _a0
|
||||
func (_m *MockManager) DeletePod(_a0 *v1.Pod) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// GetMirrorPodByPod provides a mock function with given fields: _a0
|
||||
func (_m *MockManager) GetMirrorPodByPod(_a0 *v1.Pod) (*v1.Pod, bool) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *v1.Pod
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod) *v1.Pod); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(1).(func(*v1.Pod) bool); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetPodByFullName provides a mock function with given fields: podFullName
|
||||
func (_m *MockManager) GetPodByFullName(podFullName string) (*v1.Pod, bool) {
|
||||
ret := _m.Called(podFullName)
|
||||
|
||||
var r0 *v1.Pod
|
||||
if rf, ok := ret.Get(0).(func(string) *v1.Pod); ok {
|
||||
r0 = rf(podFullName)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(1).(func(string) bool); ok {
|
||||
r1 = rf(podFullName)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetPodByMirrorPod provides a mock function with given fields: _a0
|
||||
func (_m *MockManager) GetPodByMirrorPod(_a0 *v1.Pod) (*v1.Pod, bool) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *v1.Pod
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod) *v1.Pod); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(1).(func(*v1.Pod) bool); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetPodByName provides a mock function with given fields: namespace, name
|
||||
func (_m *MockManager) GetPodByName(namespace string, name string) (*v1.Pod, bool) {
|
||||
ret := _m.Called(namespace, name)
|
||||
|
||||
var r0 *v1.Pod
|
||||
if rf, ok := ret.Get(0).(func(string, string) *v1.Pod); ok {
|
||||
r0 = rf(namespace, name)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(1).(func(string, string) bool); ok {
|
||||
r1 = rf(namespace, name)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetPodByUID provides a mock function with given fields: _a0
|
||||
func (_m *MockManager) GetPodByUID(_a0 types.UID) (*v1.Pod, bool) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *v1.Pod
|
||||
if rf, ok := ret.Get(0).(func(types.UID) *v1.Pod); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(1).(func(types.UID) bool); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetPods provides a mock function with given fields:
|
||||
func (_m *MockManager) GetPods() []*v1.Pod {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []*v1.Pod
|
||||
if rf, ok := ret.Get(0).(func() []*v1.Pod); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*v1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetPodsAndMirrorPods provides a mock function with given fields:
|
||||
func (_m *MockManager) GetPodsAndMirrorPods() ([]*v1.Pod, []*v1.Pod) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []*v1.Pod
|
||||
if rf, ok := ret.Get(0).(func() []*v1.Pod); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*v1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 []*v1.Pod
|
||||
if rf, ok := ret.Get(1).(func() []*v1.Pod); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).([]*v1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetUIDTranslations provides a mock function with given fields:
|
||||
func (_m *MockManager) GetUIDTranslations() (map[kubelettypes.ResolvedPodUID]kubelettypes.MirrorPodUID, map[kubelettypes.MirrorPodUID]kubelettypes.ResolvedPodUID) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 map[kubelettypes.ResolvedPodUID]kubelettypes.MirrorPodUID
|
||||
if rf, ok := ret.Get(0).(func() map[kubelettypes.ResolvedPodUID]kubelettypes.MirrorPodUID); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[kubelettypes.ResolvedPodUID]kubelettypes.MirrorPodUID)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 map[kubelettypes.MirrorPodUID]kubelettypes.ResolvedPodUID
|
||||
if rf, ok := ret.Get(1).(func() map[kubelettypes.MirrorPodUID]kubelettypes.ResolvedPodUID); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(map[kubelettypes.MirrorPodUID]kubelettypes.ResolvedPodUID)
|
||||
}
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// IsMirrorPodOf provides a mock function with given fields: mirrorPod, _a1
|
||||
func (_m *MockManager) IsMirrorPodOf(mirrorPod *v1.Pod, _a1 *v1.Pod) bool {
|
||||
ret := _m.Called(mirrorPod, _a1)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(*v1.Pod, *v1.Pod) bool); ok {
|
||||
r0 = rf(mirrorPod, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SetPods provides a mock function with given fields: pods
|
||||
func (_m *MockManager) SetPods(pods []*v1.Pod) {
|
||||
_m.Called(pods)
|
||||
}
|
||||
|
||||
// TranslatePodUID provides a mock function with given fields: uid
|
||||
func (_m *MockManager) TranslatePodUID(uid types.UID) kubelettypes.ResolvedPodUID {
|
||||
ret := _m.Called(uid)
|
||||
|
||||
var r0 kubelettypes.ResolvedPodUID
|
||||
if rf, ok := ret.Get(0).(func(types.UID) kubelettypes.ResolvedPodUID); ok {
|
||||
r0 = rf(uid)
|
||||
} else {
|
||||
r0 = ret.Get(0).(kubelettypes.ResolvedPodUID)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// UpdatePod provides a mock function with given fields: _a0
|
||||
func (_m *MockManager) UpdatePod(_a0 *v1.Pod) {
|
||||
_m.Called(_a0)
|
||||
}
|
|
@ -113,7 +113,7 @@ func TestRunOnce(t *testing.T) {
|
|||
kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), kubeletconfig.HairpinNone, "", network.UseDefaultMTU)
|
||||
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
||||
volumeStatsAggPeriod := time.Second * 10
|
||||
kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime)
|
||||
kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string(kb.nodeName),
|
||||
|
|
|
@ -16,7 +16,6 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1/validation:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/server/portforward:go_default_library",
|
||||
"//pkg/kubelet/server/remotecommand:go_default_library",
|
||||
|
@ -25,11 +24,9 @@ go_library(
|
|||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/util/configz:go_default_library",
|
||||
"//pkg/util/limitwriter:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor/github.com/emicklei/go-restful:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/metrics:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library",
|
||||
|
@ -61,6 +58,7 @@ go_test(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/install:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
|
@ -69,7 +67,6 @@ go_test(
|
|||
"//pkg/kubelet/server/stats:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/golang.org/x/net/websocket:go_default_library",
|
||||
|
|
|
@ -33,7 +33,6 @@ import (
|
|||
restful "github.com/emicklei/go-restful"
|
||||
"github.com/golang/glog"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/google/cadvisor/metrics"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
@ -53,7 +52,6 @@ import (
|
|||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1/validation"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/portforward"
|
||||
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
|
||||
|
@ -62,7 +60,6 @@ import (
|
|||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/util/configz"
|
||||
"k8s.io/kubernetes/pkg/util/limitwriter"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -169,14 +166,10 @@ type AuthInterface interface {
|
|||
// HostInterface contains all the kubelet methods required by the server.
|
||||
// For testability.
|
||||
type HostInterface interface {
|
||||
GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error)
|
||||
GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error)
|
||||
GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error)
|
||||
stats.StatsProvider
|
||||
GetVersionInfo() (*cadvisorapi.VersionInfo, error)
|
||||
GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error)
|
||||
GetPods() []*v1.Pod
|
||||
GetRunningPods() ([]*v1.Pod, error)
|
||||
GetPodByName(namespace, name string) (*v1.Pod, bool)
|
||||
RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error)
|
||||
ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error
|
||||
AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error
|
||||
|
@ -186,12 +179,7 @@ type HostInterface interface {
|
|||
StreamingConnectionIdleTimeout() time.Duration
|
||||
ResyncInterval() time.Duration
|
||||
GetHostname() string
|
||||
GetNode() (*v1.Node, error)
|
||||
GetNodeConfig() cm.NodeConfig
|
||||
LatestLoopEntryTime() time.Time
|
||||
ImagesFsInfo() (cadvisorapiv2.FsInfo, error)
|
||||
RootFsInfo() (cadvisorapiv2.FsInfo, error)
|
||||
ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool)
|
||||
GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error)
|
||||
GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error)
|
||||
GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error)
|
||||
|
|
|
@ -35,7 +35,6 @@ import (
|
|||
"time"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/api/core/v1"
|
||||
|
@ -49,6 +48,7 @@ import (
|
|||
"k8s.io/client-go/tools/remotecommand"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
// Do some initialization to decode the query parameters correctly.
|
||||
_ "k8s.io/kubernetes/pkg/api/install"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
|
@ -166,18 +166,6 @@ func (fk *fakeKubelet) StreamingConnectionIdleTimeout() time.Duration {
|
|||
}
|
||||
|
||||
// Unused functions
|
||||
func (_ *fakeKubelet) GetContainerInfoV2(_ string, _ cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (_ *fakeKubelet) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
|
||||
return cadvisorapiv2.FsInfo{}, fmt.Errorf("Unsupported Operation ImagesFsInfo")
|
||||
}
|
||||
|
||||
func (_ *fakeKubelet) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
|
||||
return cadvisorapiv2.FsInfo{}, fmt.Errorf("Unsupport Operation RootFsInfo")
|
||||
}
|
||||
|
||||
func (_ *fakeKubelet) GetNode() (*v1.Node, error) { return nil, nil }
|
||||
func (_ *fakeKubelet) GetNodeConfig() cm.NodeConfig { return cm.NodeConfig{} }
|
||||
|
||||
|
@ -185,6 +173,13 @@ func (fk *fakeKubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Vo
|
|||
return map[string]volume.Volume{}, true
|
||||
}
|
||||
|
||||
func (_ *fakeKubelet) RootFsStats() (*statsapi.FsStats, error) { return nil, nil }
|
||||
func (_ *fakeKubelet) ListPodStats() ([]statsapi.PodStats, error) { return nil, nil }
|
||||
func (_ *fakeKubelet) ImageFsStats() (*statsapi.FsStats, error) { return nil, nil }
|
||||
func (_ *fakeKubelet) GetCgroupStats(cgroupName string) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
type fakeAuth struct {
|
||||
authenticateFunc func(*http.Request) (user.Info, bool, error)
|
||||
attributesFunc func(user.Info, *http.Request) authorizer.Attributes
|
||||
|
@ -242,7 +237,7 @@ func newServerTest() *serverTestFramework {
|
|||
}
|
||||
server := NewServer(
|
||||
fw.fakeKubelet,
|
||||
stats.NewResourceAnalyzer(fw.fakeKubelet, time.Minute, &kubecontainertesting.FakeRuntime{}),
|
||||
stats.NewResourceAnalyzer(fw.fakeKubelet, time.Minute),
|
||||
fw.fakeAuth,
|
||||
true,
|
||||
false,
|
||||
|
|
|
@ -1,10 +1,4 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
|
@ -16,19 +10,16 @@ go_library(
|
|||
"summary.go",
|
||||
"volume_stat_calculator.go",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/leaky:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor/github.com/emicklei/go-restful:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
|
@ -38,25 +29,16 @@ go_library(
|
|||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"mocks_test.go",
|
||||
"summary_test.go",
|
||||
],
|
||||
srcs = ["summary_test.go"],
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/leaky:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
|
||||
"//pkg/kubelet/server/stats/testing:go_default_library",
|
||||
"//vendor/github.com/google/gofuzz:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/mock:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -69,6 +51,10 @@ filegroup(
|
|||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/server/stats/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
|
|
@ -24,14 +24,14 @@ import (
|
|||
"path"
|
||||
"time"
|
||||
|
||||
restful "github.com/emicklei/go-restful"
|
||||
"github.com/golang/glog"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
|
||||
"github.com/emicklei/go-restful"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
@ -39,15 +39,44 @@ import (
|
|||
|
||||
// Host methods required by stats handlers.
|
||||
type StatsProvider interface {
|
||||
// The following stats are provided by either CRI or cAdvisor.
|
||||
//
|
||||
// ListPodStats returns the stats of all the containers managed by pods.
|
||||
ListPodStats() ([]statsapi.PodStats, error)
|
||||
// ImageFsStats returns the stats of the image filesystem.
|
||||
ImageFsStats() (*statsapi.FsStats, error)
|
||||
|
||||
// The following stats are provided by cAdvisor.
|
||||
//
|
||||
// GetCgroupStats returns the stats and the networking usage of the cgroup
|
||||
// with the specified cgroupName.
|
||||
GetCgroupStats(cgroupName string) (*statsapi.ContainerStats, *statsapi.NetworkStats, error)
|
||||
// RootFsStats returns the stats of the node root filesystem.
|
||||
RootFsStats() (*statsapi.FsStats, error)
|
||||
|
||||
// The following stats are provided by cAdvisor for legacy usage.
|
||||
//
|
||||
// GetContainerInfo returns the information of the container with the
|
||||
// containerName managed by the pod with the uid.
|
||||
GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error)
|
||||
GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error)
|
||||
// GetRawContainerInfo returns the information of the container with the
|
||||
// containerName. If subcontainers is true, this function will return the
|
||||
// information of all the sub-containers as well.
|
||||
GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error)
|
||||
|
||||
// The following information is provided by Kubelet.
|
||||
//
|
||||
// GetPodByName returns the spec of the pod with the name in the specified
|
||||
// namespace.
|
||||
GetPodByName(namespace, name string) (*v1.Pod, bool)
|
||||
// GetNode returns the spec of the local node.
|
||||
GetNode() (*v1.Node, error)
|
||||
// GetNodeConfig returns the configuration of the local node.
|
||||
GetNodeConfig() cm.NodeConfig
|
||||
ImagesFsInfo() (cadvisorapiv2.FsInfo, error)
|
||||
RootFsInfo() (cadvisorapiv2.FsInfo, error)
|
||||
// ListVolumesForPod returns the stats of the volume used by the pod with
|
||||
// the podUID.
|
||||
ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool)
|
||||
// GetPods returns the specs of all the pods running on this node.
|
||||
GetPods() []*v1.Pod
|
||||
}
|
||||
|
||||
|
|
|
@ -1,244 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// DO NOT EDIT
|
||||
// GENERATED BY mockery
|
||||
|
||||
type MockStatsProvider struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetContainerInfo provides a mock function with given fields: podFullName, uid, containerName, req
|
||||
func (_m *MockStatsProvider) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) {
|
||||
ret := _m.Called(podFullName, uid, containerName, req)
|
||||
|
||||
var r0 *cadvisorapi.ContainerInfo
|
||||
if rf, ok := ret.Get(0).(func(string, types.UID, string, *cadvisorapi.ContainerInfoRequest) *cadvisorapi.ContainerInfo); ok {
|
||||
r0 = rf(podFullName, uid, containerName, req)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*cadvisorapi.ContainerInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, types.UID, string, *cadvisorapi.ContainerInfoRequest) error); ok {
|
||||
r1 = rf(podFullName, uid, containerName, req)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetContainerInfoV2 provides a mock function with given fields: name, options
|
||||
func (_m *MockStatsProvider) GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) {
|
||||
ret := _m.Called(name, options)
|
||||
|
||||
var r0 map[string]cadvisorapiv2.ContainerInfo
|
||||
if rf, ok := ret.Get(0).(func(string, cadvisorapiv2.RequestOptions) map[string]cadvisorapiv2.ContainerInfo); ok {
|
||||
r0 = rf(name, options)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[string]cadvisorapiv2.ContainerInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, cadvisorapiv2.RequestOptions) error); ok {
|
||||
r1 = rf(name, options)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetRawContainerInfo provides a mock function with given fields: containerName, req, subcontainers
|
||||
func (_m *MockStatsProvider) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) {
|
||||
ret := _m.Called(containerName, req, subcontainers)
|
||||
|
||||
var r0 map[string]*cadvisorapi.ContainerInfo
|
||||
if rf, ok := ret.Get(0).(func(string, *cadvisorapi.ContainerInfoRequest, bool) map[string]*cadvisorapi.ContainerInfo); ok {
|
||||
r0 = rf(containerName, req, subcontainers)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[string]*cadvisorapi.ContainerInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, *cadvisorapi.ContainerInfoRequest, bool) error); ok {
|
||||
r1 = rf(containerName, req, subcontainers)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetPodByName provides a mock function with given fields: namespace, name
|
||||
func (_m *MockStatsProvider) GetPodByName(namespace string, name string) (*v1.Pod, bool) {
|
||||
ret := _m.Called(namespace, name)
|
||||
|
||||
var r0 *v1.Pod
|
||||
if rf, ok := ret.Get(0).(func(string, string) *v1.Pod); ok {
|
||||
r0 = rf(namespace, name)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(1).(func(string, string) bool); ok {
|
||||
r1 = rf(namespace, name)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetNode provides a mock function with given fields:
|
||||
func (_m *MockStatsProvider) GetNode() (*v1.Node, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *v1.Node
|
||||
if rf, ok := ret.Get(0).(func() *v1.Node); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1.Node)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetNodeConfig provides a mock function with given fields:
|
||||
func (_m *MockStatsProvider) GetNodeConfig() cm.NodeConfig {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 cm.NodeConfig
|
||||
if rf, ok := ret.Get(0).(func() cm.NodeConfig); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(cm.NodeConfig)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ImagesFsInfo provides a mock function with given fields:
|
||||
func (_m *MockStatsProvider) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 cadvisorapiv2.FsInfo
|
||||
if rf, ok := ret.Get(0).(func() cadvisorapiv2.FsInfo); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(cadvisorapiv2.FsInfo)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// RootFsInfo provides a mock function with given fields:
|
||||
func (_m *MockStatsProvider) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 cadvisorapiv2.FsInfo
|
||||
if rf, ok := ret.Get(0).(func() cadvisorapiv2.FsInfo); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(cadvisorapiv2.FsInfo)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListVolumesForPod provides a mock function with given fields: podUID
|
||||
func (_m *MockStatsProvider) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) {
|
||||
ret := _m.Called(podUID)
|
||||
|
||||
var r0 map[string]volume.Volume
|
||||
if rf, ok := ret.Get(0).(func(types.UID) map[string]volume.Volume); ok {
|
||||
r0 = rf(podUID)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[string]volume.Volume)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(1).(func(types.UID) bool); ok {
|
||||
r1 = rf(podUID)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetPods provides a mock function with given fields:
|
||||
func (_m *MockStatsProvider) GetPods() []*v1.Pod {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []*v1.Pod
|
||||
if rf, ok := ret.Get(0).(func() []*v1.Pod); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*v1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
|
@ -18,8 +18,6 @@ package stats
|
|||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
// ResourceAnalyzer provides statistics on node resource consumption
|
||||
|
@ -39,9 +37,9 @@ type resourceAnalyzer struct {
|
|||
var _ ResourceAnalyzer = &resourceAnalyzer{}
|
||||
|
||||
// NewResourceAnalyzer returns a new ResourceAnalyzer
|
||||
func NewResourceAnalyzer(statsProvider StatsProvider, calVolumeFrequency time.Duration, runtime container.Runtime) ResourceAnalyzer {
|
||||
func NewResourceAnalyzer(statsProvider StatsProvider, calVolumeFrequency time.Duration) ResourceAnalyzer {
|
||||
fsAnalyzer := newFsResourceAnalyzer(statsProvider, calVolumeFrequency)
|
||||
summaryProvider := NewSummaryProvider(statsProvider, fsAnalyzer, runtime)
|
||||
summaryProvider := NewSummaryProvider(statsProvider)
|
||||
return &resourceAnalyzer{fsAnalyzer, summaryProvider}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,513 +18,85 @@ package stats
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/leaky"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
)
|
||||
|
||||
type SummaryProvider interface {
|
||||
// Get provides a new Summary using the latest results from cadvisor
|
||||
Get() (*stats.Summary, error)
|
||||
Get() (*statsapi.Summary, error)
|
||||
}
|
||||
|
||||
// summaryProviderImpl implements the SummaryProvider interface.
|
||||
type summaryProviderImpl struct {
|
||||
provider StatsProvider
|
||||
fsResourceAnalyzer fsResourceAnalyzerInterface
|
||||
runtime container.Runtime
|
||||
provider StatsProvider
|
||||
}
|
||||
|
||||
var _ SummaryProvider = &summaryProviderImpl{}
|
||||
|
||||
// NewSummaryProvider returns a new SummaryProvider
|
||||
func NewSummaryProvider(statsProvider StatsProvider, fsResourceAnalyzer fsResourceAnalyzerInterface, cruntime container.Runtime) SummaryProvider {
|
||||
return &summaryProviderImpl{statsProvider, fsResourceAnalyzer, cruntime}
|
||||
// NewSummaryProvider returns a SummaryProvider using the stats provided by the
|
||||
// specified statsProvider.
|
||||
func NewSummaryProvider(statsProvider StatsProvider) SummaryProvider {
|
||||
return &summaryProviderImpl{statsProvider}
|
||||
}
|
||||
|
||||
// Get implements the SummaryProvider interface
|
||||
// Query cadvisor for the latest resource metrics and build into a summary
|
||||
func (sp *summaryProviderImpl) Get() (*stats.Summary, error) {
|
||||
options := cadvisorapiv2.RequestOptions{
|
||||
IdType: cadvisorapiv2.TypeName,
|
||||
Count: 2, // 2 samples are needed to compute "instantaneous" CPU
|
||||
Recursive: true,
|
||||
}
|
||||
infos, err := sp.provider.GetContainerInfoV2("/", options)
|
||||
if err != nil {
|
||||
if _, ok := infos["/"]; ok {
|
||||
// If the failure is partial, log it and return a best-effort response.
|
||||
glog.Errorf("Partial failure issuing GetContainerInfoV2: %v", err)
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed GetContainerInfoV2: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(tallclair): Consider returning a best-effort response if any of the following errors
|
||||
// occur.
|
||||
// Get provides a new Summary with the stats from Kubelet.
|
||||
func (sp *summaryProviderImpl) Get() (*statsapi.Summary, error) {
|
||||
// TODO(timstclair): Consider returning a best-effort response if any of
|
||||
// the following errors occur.
|
||||
node, err := sp.provider.GetNode()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed GetNode: %v", err)
|
||||
return nil, fmt.Errorf("failed to get node info: %v", err)
|
||||
}
|
||||
|
||||
nodeConfig := sp.provider.GetNodeConfig()
|
||||
rootFsInfo, err := sp.provider.RootFsInfo()
|
||||
rootStats, networkStats, err := sp.provider.GetCgroupStats("/")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed RootFsInfo: %v", err)
|
||||
return nil, fmt.Errorf("failed to get root cgroup stats: %v", err)
|
||||
}
|
||||
imageFsInfo, err := sp.provider.ImagesFsInfo()
|
||||
rootFsStats, err := sp.provider.RootFsStats()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed DockerImagesFsInfo: %v", err)
|
||||
return nil, fmt.Errorf("failed to get rootFs stats: %v", err)
|
||||
}
|
||||
imageStats, err := sp.runtime.ImageStats()
|
||||
if err != nil || imageStats == nil {
|
||||
return nil, fmt.Errorf("failed ImageStats: %v", err)
|
||||
imageFsStats, err := sp.provider.ImageFsStats()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get imageFs stats: %v", err)
|
||||
}
|
||||
sb := &summaryBuilder{sp.fsResourceAnalyzer, node, nodeConfig, rootFsInfo, imageFsInfo, *imageStats, infos}
|
||||
return sb.build()
|
||||
}
|
||||
|
||||
// summaryBuilder aggregates the datastructures provided by cadvisor into a Summary result
|
||||
type summaryBuilder struct {
|
||||
fsResourceAnalyzer fsResourceAnalyzerInterface
|
||||
node *v1.Node
|
||||
nodeConfig cm.NodeConfig
|
||||
rootFsInfo cadvisorapiv2.FsInfo
|
||||
imageFsInfo cadvisorapiv2.FsInfo
|
||||
imageStats container.ImageStats
|
||||
infos map[string]cadvisorapiv2.ContainerInfo
|
||||
}
|
||||
|
||||
// build returns a Summary from aggregating the input data
|
||||
func (sb *summaryBuilder) build() (*stats.Summary, error) {
|
||||
rootInfo, found := sb.infos["/"]
|
||||
if !found {
|
||||
return nil, fmt.Errorf("Missing stats for root container")
|
||||
podStats, err := sp.provider.ListPodStats()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list pod stats: %v", err)
|
||||
}
|
||||
|
||||
var nodeFsInodesUsed *uint64
|
||||
if sb.rootFsInfo.Inodes != nil && sb.rootFsInfo.InodesFree != nil {
|
||||
nodeFsIU := *sb.rootFsInfo.Inodes - *sb.rootFsInfo.InodesFree
|
||||
nodeFsInodesUsed = &nodeFsIU
|
||||
}
|
||||
|
||||
var imageFsInodesUsed *uint64
|
||||
if sb.imageFsInfo.Inodes != nil && sb.imageFsInfo.InodesFree != nil {
|
||||
imageFsIU := *sb.imageFsInfo.Inodes - *sb.imageFsInfo.InodesFree
|
||||
imageFsInodesUsed = &imageFsIU
|
||||
}
|
||||
|
||||
rootStats := sb.containerInfoV2ToStats("", &rootInfo)
|
||||
cStats, _ := latestContainerStats(&rootInfo)
|
||||
nodeStats := stats.NodeStats{
|
||||
NodeName: sb.node.Name,
|
||||
CPU: rootStats.CPU,
|
||||
Memory: rootStats.Memory,
|
||||
Network: sb.containerInfoV2ToNetworkStats("node:"+sb.node.Name, &rootInfo),
|
||||
Fs: &stats.FsStats{
|
||||
Time: metav1.NewTime(cStats.Timestamp),
|
||||
AvailableBytes: &sb.rootFsInfo.Available,
|
||||
CapacityBytes: &sb.rootFsInfo.Capacity,
|
||||
UsedBytes: &sb.rootFsInfo.Usage,
|
||||
InodesFree: sb.rootFsInfo.InodesFree,
|
||||
Inodes: sb.rootFsInfo.Inodes,
|
||||
InodesUsed: nodeFsInodesUsed,
|
||||
},
|
||||
nodeStats := statsapi.NodeStats{
|
||||
NodeName: node.Name,
|
||||
CPU: rootStats.CPU,
|
||||
Memory: rootStats.Memory,
|
||||
Network: networkStats,
|
||||
StartTime: rootStats.StartTime,
|
||||
Runtime: &stats.RuntimeStats{
|
||||
ImageFs: &stats.FsStats{
|
||||
Time: metav1.NewTime(cStats.Timestamp),
|
||||
AvailableBytes: &sb.imageFsInfo.Available,
|
||||
CapacityBytes: &sb.imageFsInfo.Capacity,
|
||||
UsedBytes: &sb.imageStats.TotalStorageBytes,
|
||||
InodesFree: sb.imageFsInfo.InodesFree,
|
||||
Inodes: sb.imageFsInfo.Inodes,
|
||||
InodesUsed: imageFsInodesUsed,
|
||||
},
|
||||
},
|
||||
Fs: rootFsStats,
|
||||
Runtime: &statsapi.RuntimeStats{ImageFs: imageFsStats},
|
||||
}
|
||||
|
||||
systemContainers := map[string]string{
|
||||
stats.SystemContainerKubelet: sb.nodeConfig.KubeletCgroupsName,
|
||||
stats.SystemContainerRuntime: sb.nodeConfig.RuntimeCgroupsName,
|
||||
stats.SystemContainerMisc: sb.nodeConfig.SystemCgroupsName,
|
||||
statsapi.SystemContainerKubelet: nodeConfig.KubeletCgroupsName,
|
||||
statsapi.SystemContainerRuntime: nodeConfig.RuntimeCgroupsName,
|
||||
statsapi.SystemContainerMisc: nodeConfig.SystemCgroupsName,
|
||||
}
|
||||
for sys, name := range systemContainers {
|
||||
if info, ok := sb.infos[name]; ok {
|
||||
sysCont := sb.containerInfoV2ToStats(sys, &info)
|
||||
// System containers don't have a filesystem associated with them.
|
||||
sysCont.Rootfs = nil
|
||||
sysCont.Logs = nil
|
||||
nodeStats.SystemContainers = append(nodeStats.SystemContainers, sysCont)
|
||||
s, _, err := sp.provider.GetCgroupStats(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get system container stats for %q: %v", name, err)
|
||||
continue
|
||||
}
|
||||
// System containers don't have a filesystem associated with them.
|
||||
s.Logs, s.Rootfs = nil, nil
|
||||
s.Name = sys
|
||||
nodeStats.SystemContainers = append(nodeStats.SystemContainers, *s)
|
||||
}
|
||||
|
||||
summary := stats.Summary{
|
||||
summary := statsapi.Summary{
|
||||
Node: nodeStats,
|
||||
Pods: sb.buildSummaryPods(),
|
||||
Pods: podStats,
|
||||
}
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
// containerInfoV2FsStats populates the container fs stats
|
||||
func (sb *summaryBuilder) containerInfoV2FsStats(
|
||||
info *cadvisorapiv2.ContainerInfo,
|
||||
cs *stats.ContainerStats) {
|
||||
|
||||
lcs, found := latestContainerStats(info)
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
// The container logs live on the node rootfs device
|
||||
cs.Logs = &stats.FsStats{
|
||||
Time: metav1.NewTime(lcs.Timestamp),
|
||||
AvailableBytes: &sb.rootFsInfo.Available,
|
||||
CapacityBytes: &sb.rootFsInfo.Capacity,
|
||||
InodesFree: sb.rootFsInfo.InodesFree,
|
||||
Inodes: sb.rootFsInfo.Inodes,
|
||||
}
|
||||
|
||||
if sb.rootFsInfo.Inodes != nil && sb.rootFsInfo.InodesFree != nil {
|
||||
logsInodesUsed := *sb.rootFsInfo.Inodes - *sb.rootFsInfo.InodesFree
|
||||
cs.Logs.InodesUsed = &logsInodesUsed
|
||||
}
|
||||
|
||||
// The container rootFs lives on the imageFs devices (which may not be the node root fs)
|
||||
cs.Rootfs = &stats.FsStats{
|
||||
Time: metav1.NewTime(lcs.Timestamp),
|
||||
AvailableBytes: &sb.imageFsInfo.Available,
|
||||
CapacityBytes: &sb.imageFsInfo.Capacity,
|
||||
InodesFree: sb.imageFsInfo.InodesFree,
|
||||
Inodes: sb.imageFsInfo.Inodes,
|
||||
}
|
||||
cfs := lcs.Filesystem
|
||||
|
||||
if cfs != nil {
|
||||
if cfs.BaseUsageBytes != nil {
|
||||
rootfsUsage := *cfs.BaseUsageBytes
|
||||
cs.Rootfs.UsedBytes = &rootfsUsage
|
||||
if cfs.TotalUsageBytes != nil {
|
||||
logsUsage := *cfs.TotalUsageBytes - *cfs.BaseUsageBytes
|
||||
cs.Logs.UsedBytes = &logsUsage
|
||||
}
|
||||
}
|
||||
if cfs.InodeUsage != nil {
|
||||
rootInodes := *cfs.InodeUsage
|
||||
cs.Rootfs.InodesUsed = &rootInodes
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// latestContainerStats returns the latest container stats from cadvisor, or nil if none exist
|
||||
func latestContainerStats(info *cadvisorapiv2.ContainerInfo) (*cadvisorapiv2.ContainerStats, bool) {
|
||||
stats := info.Stats
|
||||
if len(stats) < 1 {
|
||||
return nil, false
|
||||
}
|
||||
latest := stats[len(stats)-1]
|
||||
if latest == nil {
|
||||
return nil, false
|
||||
}
|
||||
return latest, true
|
||||
}
|
||||
|
||||
// hasMemoryAndCPUInstUsage returns true if the specified container info has
|
||||
// both non-zero CPU instantaneous usage and non-zero memory RSS usage, and
|
||||
// false otherwise.
|
||||
func hasMemoryAndCPUInstUsage(info *cadvisorapiv2.ContainerInfo) bool {
|
||||
if !info.Spec.HasCpu || !info.Spec.HasMemory {
|
||||
return false
|
||||
}
|
||||
cstat, found := latestContainerStats(info)
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
if cstat.CpuInst == nil {
|
||||
return false
|
||||
}
|
||||
return cstat.CpuInst.Usage.Total != 0 && cstat.Memory.RSS != 0
|
||||
}
|
||||
|
||||
// ByCreationTime implements sort.Interface for []containerInfoWithCgroup based
|
||||
// on the cinfo.Spec.CreationTime field.
|
||||
type ByCreationTime []containerInfoWithCgroup
|
||||
|
||||
func (a ByCreationTime) Len() int { return len(a) }
|
||||
func (a ByCreationTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByCreationTime) Less(i, j int) bool {
|
||||
if a[i].cinfo.Spec.CreationTime.Equal(a[j].cinfo.Spec.CreationTime) {
|
||||
// There shouldn't be two containers with the same name and/or the same
|
||||
// creation time. However, to make the logic here robust, we break the
|
||||
// tie by moving the one without CPU instantaneous or memory RSS usage
|
||||
// to the beginning.
|
||||
return hasMemoryAndCPUInstUsage(&a[j].cinfo)
|
||||
}
|
||||
return a[i].cinfo.Spec.CreationTime.Before(a[j].cinfo.Spec.CreationTime)
|
||||
}
|
||||
|
||||
// containerID is the identity of a container in a pod.
|
||||
type containerID struct {
|
||||
podRef stats.PodReference
|
||||
containerName string
|
||||
}
|
||||
|
||||
// containerInfoWithCgroup contains the ContainerInfo and its cgroup name.
|
||||
type containerInfoWithCgroup struct {
|
||||
cinfo cadvisorapiv2.ContainerInfo
|
||||
cgroup string
|
||||
}
|
||||
|
||||
// removeTerminatedContainerInfo returns the specified containerInfo but with
|
||||
// the stats of the terminated containers removed.
|
||||
//
|
||||
// A ContainerInfo is considered to be of a terminated container if it has an
|
||||
// older CreationTime and zero CPU instantaneous and memory RSS usage.
|
||||
func removeTerminatedContainerInfo(containerInfo map[string]cadvisorapiv2.ContainerInfo) map[string]cadvisorapiv2.ContainerInfo {
|
||||
cinfoMap := make(map[containerID][]containerInfoWithCgroup)
|
||||
for key, cinfo := range containerInfo {
|
||||
if !isPodManagedContainer(&cinfo) {
|
||||
continue
|
||||
}
|
||||
cinfoID := containerID{
|
||||
podRef: buildPodRef(&cinfo),
|
||||
containerName: types.GetContainerName(cinfo.Spec.Labels),
|
||||
}
|
||||
cinfoMap[cinfoID] = append(cinfoMap[cinfoID], containerInfoWithCgroup{
|
||||
cinfo: cinfo,
|
||||
cgroup: key,
|
||||
})
|
||||
}
|
||||
result := make(map[string]cadvisorapiv2.ContainerInfo)
|
||||
for _, refs := range cinfoMap {
|
||||
if len(refs) == 1 {
|
||||
result[refs[0].cgroup] = refs[0].cinfo
|
||||
continue
|
||||
}
|
||||
sort.Sort(ByCreationTime(refs))
|
||||
i := 0
|
||||
for ; i < len(refs); i++ {
|
||||
if hasMemoryAndCPUInstUsage(&refs[i].cinfo) {
|
||||
// Stops removing when we first see an info with non-zero
|
||||
// CPU/Memory usage.
|
||||
break
|
||||
}
|
||||
}
|
||||
for ; i < len(refs); i++ {
|
||||
result[refs[i].cgroup] = refs[i].cinfo
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// buildSummaryPods aggregates and returns the container stats in cinfos by the Pod managing the container.
|
||||
// Containers not managed by a Pod are omitted.
|
||||
func (sb *summaryBuilder) buildSummaryPods() []stats.PodStats {
|
||||
// Map each container to a pod and update the PodStats with container data
|
||||
podToStats := map[stats.PodReference]*stats.PodStats{}
|
||||
infos := removeTerminatedContainerInfo(sb.infos)
|
||||
for key, cinfo := range infos {
|
||||
// on systemd using devicemapper each mount into the container has an associated cgroup.
|
||||
// we ignore them to ensure we do not get duplicate entries in our summary.
|
||||
// for details on .mount units: http://man7.org/linux/man-pages/man5/systemd.mount.5.html
|
||||
if strings.HasSuffix(key, ".mount") {
|
||||
continue
|
||||
}
|
||||
// Build the Pod key if this container is managed by a Pod
|
||||
if !isPodManagedContainer(&cinfo) {
|
||||
continue
|
||||
}
|
||||
ref := buildPodRef(&cinfo)
|
||||
|
||||
// Lookup the PodStats for the pod using the PodRef. If none exists, initialize a new entry.
|
||||
podStats, found := podToStats[ref]
|
||||
if !found {
|
||||
podStats = &stats.PodStats{PodRef: ref}
|
||||
podToStats[ref] = podStats
|
||||
}
|
||||
|
||||
// Update the PodStats entry with the stats from the container by adding it to stats.Containers
|
||||
containerName := types.GetContainerName(cinfo.Spec.Labels)
|
||||
if containerName == leaky.PodInfraContainerName {
|
||||
// Special case for infrastructure container which is hidden from the user and has network stats
|
||||
podStats.Network = sb.containerInfoV2ToNetworkStats("pod:"+ref.Namespace+"_"+ref.Name, &cinfo)
|
||||
podStats.StartTime = metav1.NewTime(cinfo.Spec.CreationTime)
|
||||
} else {
|
||||
podStats.Containers = append(podStats.Containers, sb.containerInfoV2ToStats(containerName, &cinfo))
|
||||
}
|
||||
}
|
||||
|
||||
// Add each PodStats to the result
|
||||
result := make([]stats.PodStats, 0, len(podToStats))
|
||||
for _, podStats := range podToStats {
|
||||
// Lookup the volume stats for each pod
|
||||
podUID := kubetypes.UID(podStats.PodRef.UID)
|
||||
if vstats, found := sb.fsResourceAnalyzer.GetPodVolumeStats(podUID); found {
|
||||
podStats.VolumeStats = vstats.Volumes
|
||||
}
|
||||
result = append(result, *podStats)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// buildPodRef returns a PodReference that identifies the Pod managing cinfo
|
||||
func buildPodRef(cinfo *cadvisorapiv2.ContainerInfo) stats.PodReference {
|
||||
podName := types.GetPodName(cinfo.Spec.Labels)
|
||||
podNamespace := types.GetPodNamespace(cinfo.Spec.Labels)
|
||||
podUID := types.GetPodUID(cinfo.Spec.Labels)
|
||||
return stats.PodReference{Name: podName, Namespace: podNamespace, UID: podUID}
|
||||
}
|
||||
|
||||
// isPodManagedContainer returns true if the cinfo container is managed by a Pod
|
||||
func isPodManagedContainer(cinfo *cadvisorapiv2.ContainerInfo) bool {
|
||||
podName := types.GetPodName(cinfo.Spec.Labels)
|
||||
podNamespace := types.GetPodNamespace(cinfo.Spec.Labels)
|
||||
managed := podName != "" && podNamespace != ""
|
||||
if !managed && podName != podNamespace {
|
||||
glog.Warningf(
|
||||
"Expect container to have either both podName (%s) and podNamespace (%s) labels, or neither.",
|
||||
podName, podNamespace)
|
||||
}
|
||||
return managed
|
||||
}
|
||||
|
||||
func (sb *summaryBuilder) containerInfoV2ToStats(
|
||||
name string,
|
||||
info *cadvisorapiv2.ContainerInfo) stats.ContainerStats {
|
||||
cStats := stats.ContainerStats{
|
||||
StartTime: metav1.NewTime(info.Spec.CreationTime),
|
||||
Name: name,
|
||||
}
|
||||
cstat, found := latestContainerStats(info)
|
||||
if !found {
|
||||
return cStats
|
||||
}
|
||||
if info.Spec.HasCpu {
|
||||
cpuStats := stats.CPUStats{
|
||||
Time: metav1.NewTime(cstat.Timestamp),
|
||||
}
|
||||
if cstat.CpuInst != nil {
|
||||
cpuStats.UsageNanoCores = &cstat.CpuInst.Usage.Total
|
||||
}
|
||||
if cstat.Cpu != nil {
|
||||
cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total
|
||||
}
|
||||
cStats.CPU = &cpuStats
|
||||
}
|
||||
if info.Spec.HasMemory {
|
||||
pageFaults := cstat.Memory.ContainerData.Pgfault
|
||||
majorPageFaults := cstat.Memory.ContainerData.Pgmajfault
|
||||
cStats.Memory = &stats.MemoryStats{
|
||||
Time: metav1.NewTime(cstat.Timestamp),
|
||||
UsageBytes: &cstat.Memory.Usage,
|
||||
WorkingSetBytes: &cstat.Memory.WorkingSet,
|
||||
RSSBytes: &cstat.Memory.RSS,
|
||||
PageFaults: &pageFaults,
|
||||
MajorPageFaults: &majorPageFaults,
|
||||
}
|
||||
// availableBytes = memory limit (if known) - workingset
|
||||
if !isMemoryUnlimited(info.Spec.Memory.Limit) {
|
||||
availableBytes := info.Spec.Memory.Limit - cstat.Memory.WorkingSet
|
||||
cStats.Memory.AvailableBytes = &availableBytes
|
||||
}
|
||||
}
|
||||
|
||||
sb.containerInfoV2FsStats(info, &cStats)
|
||||
cStats.UserDefinedMetrics = sb.containerInfoV2ToUserDefinedMetrics(info)
|
||||
return cStats
|
||||
}
|
||||
|
||||
// Size after which we consider memory to be "unlimited". This is not
|
||||
// MaxInt64 due to rounding by the kernel.
|
||||
// TODO: cadvisor should export this https://github.com/google/cadvisor/blob/master/metrics/prometheus.go#L596
|
||||
const maxMemorySize = uint64(1 << 62)
|
||||
|
||||
func isMemoryUnlimited(v uint64) bool {
|
||||
return v > maxMemorySize
|
||||
}
|
||||
|
||||
func (sb *summaryBuilder) containerInfoV2ToNetworkStats(name string, info *cadvisorapiv2.ContainerInfo) *stats.NetworkStats {
|
||||
if !info.Spec.HasNetwork {
|
||||
return nil
|
||||
}
|
||||
cstat, found := latestContainerStats(info)
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
for _, inter := range cstat.Network.Interfaces {
|
||||
if inter.Name == network.DefaultInterfaceName {
|
||||
return &stats.NetworkStats{
|
||||
Time: metav1.NewTime(cstat.Timestamp),
|
||||
RxBytes: &inter.RxBytes,
|
||||
RxErrors: &inter.RxErrors,
|
||||
TxBytes: &inter.TxBytes,
|
||||
TxErrors: &inter.TxErrors,
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Missing default interface %q for %s", network.DefaultInterfaceName, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sb *summaryBuilder) containerInfoV2ToUserDefinedMetrics(info *cadvisorapiv2.ContainerInfo) []stats.UserDefinedMetric {
|
||||
type specVal struct {
|
||||
ref stats.UserDefinedMetricDescriptor
|
||||
valType cadvisorapiv1.DataType
|
||||
time time.Time
|
||||
value float64
|
||||
}
|
||||
udmMap := map[string]*specVal{}
|
||||
for _, spec := range info.Spec.CustomMetrics {
|
||||
udmMap[spec.Name] = &specVal{
|
||||
ref: stats.UserDefinedMetricDescriptor{
|
||||
Name: spec.Name,
|
||||
Type: stats.UserDefinedMetricType(spec.Type),
|
||||
Units: spec.Units,
|
||||
},
|
||||
valType: spec.Format,
|
||||
}
|
||||
}
|
||||
for _, stat := range info.Stats {
|
||||
for name, values := range stat.CustomMetrics {
|
||||
specVal, ok := udmMap[name]
|
||||
if !ok {
|
||||
glog.Warningf("spec for custom metric %q is missing from cAdvisor output. Spec: %+v, Metrics: %+v", name, info.Spec, stat.CustomMetrics)
|
||||
continue
|
||||
}
|
||||
for _, value := range values {
|
||||
// Pick the most recent value
|
||||
if value.Timestamp.Before(specVal.time) {
|
||||
continue
|
||||
}
|
||||
specVal.time = value.Timestamp
|
||||
specVal.value = value.FloatValue
|
||||
if specVal.valType == cadvisorapiv1.IntType {
|
||||
specVal.value = float64(value.IntValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
var udm []stats.UserDefinedMetric
|
||||
for _, specVal := range udmMap {
|
||||
udm = append(udm, stats.UserDefinedMetric{
|
||||
UserDefinedMetricDescriptor: specVal.ref,
|
||||
Time: metav1.NewTime(specVal.time),
|
||||
Value: specVal.value,
|
||||
})
|
||||
}
|
||||
return udm
|
||||
}
|
||||
|
|
|
@ -20,480 +20,123 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/info/v2"
|
||||
fuzz "github.com/google/gofuzz"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
k8sv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubestats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/leaky"
|
||||
statstest "k8s.io/kubernetes/pkg/kubelet/server/stats/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
// Offsets from seed value in generated container stats.
|
||||
offsetCPUUsageCores = iota
|
||||
offsetCPUUsageCoreSeconds
|
||||
offsetMemPageFaults
|
||||
offsetMemMajorPageFaults
|
||||
offsetMemUsageBytes
|
||||
offsetMemRSSBytes
|
||||
offsetMemWorkingSetBytes
|
||||
offsetNetRxBytes
|
||||
offsetNetRxErrors
|
||||
offsetNetTxBytes
|
||||
offsetNetTxErrors
|
||||
)
|
||||
|
||||
var (
|
||||
timestamp = time.Now()
|
||||
creationTime = timestamp.Add(-5 * time.Minute)
|
||||
)
|
||||
|
||||
func TestRemoveTerminatedContainerInfo(t *testing.T) {
|
||||
const (
|
||||
seedPastPod0Infra = 1000
|
||||
seedPastPod0Container0 = 2000
|
||||
seedPod0Infra = 3000
|
||||
seedPod0Container0 = 4000
|
||||
)
|
||||
const (
|
||||
namespace = "test"
|
||||
pName0 = "pod0"
|
||||
cName00 = "c0"
|
||||
)
|
||||
infos := map[string]v2.ContainerInfo{
|
||||
// ContainerInfo with past creation time and no CPU/memory usage for
|
||||
// simulating uncleaned cgroups of already terminated containers, which
|
||||
// should not be shown in the results.
|
||||
"/pod0-i-terminated-1": summaryTerminatedContainerInfo(seedPastPod0Infra, pName0, namespace, leaky.PodInfraContainerName),
|
||||
"/pod0-c0-terminated-1": summaryTerminatedContainerInfo(seedPastPod0Container0, pName0, namespace, cName00),
|
||||
|
||||
// Same as above but uses the same creation time as the latest
|
||||
// containers. They are terminated containers, so they should not be in
|
||||
// the results.
|
||||
"/pod0-i-terminated-2": summaryTerminatedContainerInfo(seedPod0Infra, pName0, namespace, leaky.PodInfraContainerName),
|
||||
"/pod0-c0-terminated-2": summaryTerminatedContainerInfo(seedPod0Container0, pName0, namespace, cName00),
|
||||
|
||||
// The latest containers, which should be in the results.
|
||||
"/pod0-i": summaryTestContainerInfo(seedPod0Infra, pName0, namespace, leaky.PodInfraContainerName),
|
||||
"/pod0-c0": summaryTestContainerInfo(seedPod0Container0, pName0, namespace, cName00),
|
||||
|
||||
// Duplicated containers with non-zero CPU and memory usage. This case
|
||||
// shouldn't happen unless something goes wrong, but we want to test
|
||||
// that the metrics reporting logic works in this scenario.
|
||||
"/pod0-i-duplicated": summaryTestContainerInfo(seedPod0Infra, pName0, namespace, leaky.PodInfraContainerName),
|
||||
"/pod0-c0-duplicated": summaryTestContainerInfo(seedPod0Container0, pName0, namespace, cName00),
|
||||
}
|
||||
output := removeTerminatedContainerInfo(infos)
|
||||
assert.Len(t, output, 4)
|
||||
for _, c := range []string{"/pod0-i", "/pod0-c0", "/pod0-i-duplicated", "/pod0-c0-duplicated"} {
|
||||
if _, found := output[c]; !found {
|
||||
t.Errorf("%q is expected to be in the output\n", c)
|
||||
func TestSummaryProvider(t *testing.T) {
|
||||
var (
|
||||
podStats = []statsapi.PodStats{
|
||||
{
|
||||
PodRef: statsapi.PodReference{Name: "test-pod", Namespace: "test-namespace", UID: "UID_test-pod"},
|
||||
StartTime: metav1.NewTime(time.Now()),
|
||||
Containers: []statsapi.ContainerStats{*getContainerStats()},
|
||||
Network: getNetworkStats(),
|
||||
VolumeStats: []statsapi.VolumeStats{*getVolumeStats()},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildSummary(t *testing.T) {
|
||||
node := k8sv1.Node{}
|
||||
node.Name = "FooNode"
|
||||
nodeConfig := cm.NodeConfig{
|
||||
RuntimeCgroupsName: "/docker-daemon",
|
||||
SystemCgroupsName: "/system",
|
||||
KubeletCgroupsName: "/kubelet",
|
||||
}
|
||||
const (
|
||||
namespace0 = "test0"
|
||||
namespace2 = "test2"
|
||||
)
|
||||
const (
|
||||
seedRoot = 0
|
||||
seedRuntime = 100
|
||||
seedKubelet = 200
|
||||
seedMisc = 300
|
||||
seedPod0Infra = 1000
|
||||
seedPod0Container0 = 2000
|
||||
seedPod0Container1 = 2001
|
||||
seedPod1Infra = 3000
|
||||
seedPod1Container = 4000
|
||||
seedPod2Infra = 5000
|
||||
seedPod2Container = 6000
|
||||
)
|
||||
const (
|
||||
pName0 = "pod0"
|
||||
pName1 = "pod1"
|
||||
pName2 = "pod0" // ensure pName2 conflicts with pName0, but is in a different namespace
|
||||
)
|
||||
const (
|
||||
cName00 = "c0"
|
||||
cName01 = "c1"
|
||||
cName10 = "c0" // ensure cName10 conflicts with cName02, but is in a different pod
|
||||
cName20 = "c1" // ensure cName20 conflicts with cName01, but is in a different pod + namespace
|
||||
)
|
||||
const (
|
||||
rootfsCapacity = uint64(10000000)
|
||||
rootfsAvailable = uint64(5000000)
|
||||
rootfsInodesFree = uint64(1000)
|
||||
rootfsInodes = uint64(2000)
|
||||
imagefsCapacity = uint64(20000000)
|
||||
imagefsAvailable = uint64(8000000)
|
||||
imagefsInodesFree = uint64(2000)
|
||||
imagefsInodes = uint64(4000)
|
||||
)
|
||||
|
||||
prf0 := kubestats.PodReference{Name: pName0, Namespace: namespace0, UID: "UID" + pName0}
|
||||
prf1 := kubestats.PodReference{Name: pName1, Namespace: namespace0, UID: "UID" + pName1}
|
||||
prf2 := kubestats.PodReference{Name: pName2, Namespace: namespace2, UID: "UID" + pName2}
|
||||
infos := map[string]v2.ContainerInfo{
|
||||
"/": summaryTestContainerInfo(seedRoot, "", "", ""),
|
||||
"/docker-daemon": summaryTestContainerInfo(seedRuntime, "", "", ""),
|
||||
"/kubelet": summaryTestContainerInfo(seedKubelet, "", "", ""),
|
||||
"/system": summaryTestContainerInfo(seedMisc, "", "", ""),
|
||||
// Pod0 - Namespace0
|
||||
"/pod0-i": summaryTestContainerInfo(seedPod0Infra, pName0, namespace0, leaky.PodInfraContainerName),
|
||||
"/pod0-c0": summaryTestContainerInfo(seedPod0Container0, pName0, namespace0, cName00),
|
||||
"/pod0-c1": summaryTestContainerInfo(seedPod0Container1, pName0, namespace0, cName01),
|
||||
// Pod1 - Namespace0
|
||||
"/pod1-i": summaryTestContainerInfo(seedPod1Infra, pName1, namespace0, leaky.PodInfraContainerName),
|
||||
"/pod1-c0": summaryTestContainerInfo(seedPod1Container, pName1, namespace0, cName10),
|
||||
// Pod2 - Namespace2
|
||||
"/pod2-i": summaryTestContainerInfo(seedPod2Infra, pName2, namespace2, leaky.PodInfraContainerName),
|
||||
"/pod2-c0": summaryTestContainerInfo(seedPod2Container, pName2, namespace2, cName20),
|
||||
}
|
||||
|
||||
freeRootfsInodes := rootfsInodesFree
|
||||
totalRootfsInodes := rootfsInodes
|
||||
rootfs := v2.FsInfo{
|
||||
Capacity: rootfsCapacity,
|
||||
Available: rootfsAvailable,
|
||||
InodesFree: &freeRootfsInodes,
|
||||
Inodes: &totalRootfsInodes,
|
||||
}
|
||||
freeImagefsInodes := imagefsInodesFree
|
||||
totalImagefsInodes := imagefsInodes
|
||||
imagefs := v2.FsInfo{
|
||||
Capacity: imagefsCapacity,
|
||||
Available: imagefsAvailable,
|
||||
InodesFree: &freeImagefsInodes,
|
||||
Inodes: &totalImagefsInodes,
|
||||
}
|
||||
|
||||
// memory limit overrides for each container (used to test available bytes if a memory limit is known)
|
||||
memoryLimitOverrides := map[string]uint64{
|
||||
"/": uint64(1 << 30),
|
||||
"/pod2-c0": uint64(1 << 15),
|
||||
}
|
||||
for name, memoryLimitOverride := range memoryLimitOverrides {
|
||||
info, found := infos[name]
|
||||
if !found {
|
||||
t.Errorf("No container defined with name %v", name)
|
||||
imageFsStats = getFsStats()
|
||||
rootFsStats = getFsStats()
|
||||
node = &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "test-node"}}
|
||||
nodeConfig = cm.NodeConfig{
|
||||
RuntimeCgroupsName: "/runtime",
|
||||
SystemCgroupsName: "/system",
|
||||
KubeletCgroupsName: "/kubelet",
|
||||
}
|
||||
info.Spec.Memory.Limit = memoryLimitOverride
|
||||
infos[name] = info
|
||||
}
|
||||
|
||||
sb := &summaryBuilder{
|
||||
newFsResourceAnalyzer(&MockStatsProvider{}, time.Minute*5), &node, nodeConfig, rootfs, imagefs, container.ImageStats{}, infos}
|
||||
summary, err := sb.build()
|
||||
|
||||
assert.NoError(t, err)
|
||||
nodeStats := summary.Node
|
||||
assert.Equal(t, "FooNode", nodeStats.NodeName)
|
||||
assert.EqualValues(t, testTime(creationTime, seedRoot).Unix(), nodeStats.StartTime.Time.Unix())
|
||||
checkCPUStats(t, "Node", seedRoot, nodeStats.CPU)
|
||||
checkMemoryStats(t, "Node", seedRoot, infos["/"], nodeStats.Memory)
|
||||
checkNetworkStats(t, "Node", seedRoot, nodeStats.Network)
|
||||
|
||||
systemSeeds := map[string]int{
|
||||
kubestats.SystemContainerRuntime: seedRuntime,
|
||||
kubestats.SystemContainerKubelet: seedKubelet,
|
||||
kubestats.SystemContainerMisc: seedMisc,
|
||||
}
|
||||
systemContainerToNodeCgroup := map[string]string{
|
||||
kubestats.SystemContainerRuntime: nodeConfig.RuntimeCgroupsName,
|
||||
kubestats.SystemContainerKubelet: nodeConfig.KubeletCgroupsName,
|
||||
kubestats.SystemContainerMisc: nodeConfig.SystemCgroupsName,
|
||||
}
|
||||
for _, sys := range nodeStats.SystemContainers {
|
||||
name := sys.Name
|
||||
info := infos[systemContainerToNodeCgroup[name]]
|
||||
seed, found := systemSeeds[name]
|
||||
if !found {
|
||||
t.Errorf("Unknown SystemContainer: %q", name)
|
||||
cgroupStatsMap = map[string]struct {
|
||||
cs *statsapi.ContainerStats
|
||||
ns *statsapi.NetworkStats
|
||||
}{
|
||||
"/": {cs: getContainerStats(), ns: getNetworkStats()},
|
||||
"/runtime": {cs: getContainerStats(), ns: getNetworkStats()},
|
||||
"/system": {cs: getContainerStats(), ns: getNetworkStats()},
|
||||
"/kubelet": {cs: getContainerStats(), ns: getNetworkStats()},
|
||||
}
|
||||
assert.EqualValues(t, testTime(creationTime, seed).Unix(), sys.StartTime.Time.Unix(), name+".StartTime")
|
||||
checkCPUStats(t, name, seed, sys.CPU)
|
||||
checkMemoryStats(t, name, seed, info, sys.Memory)
|
||||
assert.Nil(t, sys.Logs, name+".Logs")
|
||||
assert.Nil(t, sys.Rootfs, name+".Rootfs")
|
||||
}
|
||||
)
|
||||
|
||||
assert.Equal(t, 3, len(summary.Pods))
|
||||
indexPods := make(map[kubestats.PodReference]kubestats.PodStats, len(summary.Pods))
|
||||
for _, pod := range summary.Pods {
|
||||
indexPods[pod.PodRef] = pod
|
||||
}
|
||||
assert := assert.New(t)
|
||||
|
||||
// Validate Pod0 Results
|
||||
ps, found := indexPods[prf0]
|
||||
assert.True(t, found)
|
||||
assert.Len(t, ps.Containers, 2)
|
||||
indexCon := make(map[string]kubestats.ContainerStats, len(ps.Containers))
|
||||
for _, con := range ps.Containers {
|
||||
indexCon[con.Name] = con
|
||||
}
|
||||
con := indexCon[cName00]
|
||||
assert.EqualValues(t, testTime(creationTime, seedPod0Container0).Unix(), con.StartTime.Time.Unix())
|
||||
checkCPUStats(t, "Pod0Container0", seedPod0Container0, con.CPU)
|
||||
checkMemoryStats(t, "Pod0Conainer0", seedPod0Container0, infos["/pod0-c0"], con.Memory)
|
||||
mockStatsProvider := new(statstest.StatsProvider)
|
||||
mockStatsProvider.
|
||||
On("GetNode").Return(node, nil).
|
||||
On("GetNodeConfig").Return(nodeConfig).
|
||||
On("ListPodStats").Return(podStats, nil).
|
||||
On("ImageFsStats").Return(imageFsStats, nil).
|
||||
On("RootFsStats").Return(rootFsStats, nil).
|
||||
On("GetCgroupStats", "/").Return(cgroupStatsMap["/"].cs, cgroupStatsMap["/"].ns, nil).
|
||||
On("GetCgroupStats", "/runtime").Return(cgroupStatsMap["/runtime"].cs, cgroupStatsMap["/runtime"].ns, nil).
|
||||
On("GetCgroupStats", "/system").Return(cgroupStatsMap["/system"].cs, cgroupStatsMap["/system"].ns, nil).
|
||||
On("GetCgroupStats", "/kubelet").Return(cgroupStatsMap["/kubelet"].cs, cgroupStatsMap["/kubelet"].ns, nil)
|
||||
|
||||
con = indexCon[cName01]
|
||||
assert.EqualValues(t, testTime(creationTime, seedPod0Container1).Unix(), con.StartTime.Time.Unix())
|
||||
checkCPUStats(t, "Pod0Container1", seedPod0Container1, con.CPU)
|
||||
checkMemoryStats(t, "Pod0Container1", seedPod0Container1, infos["/pod0-c1"], con.Memory)
|
||||
provider := NewSummaryProvider(mockStatsProvider)
|
||||
summary, err := provider.Get()
|
||||
assert.NoError(err)
|
||||
|
||||
assert.EqualValues(t, testTime(creationTime, seedPod0Infra).Unix(), ps.StartTime.Time.Unix())
|
||||
checkNetworkStats(t, "Pod0", seedPod0Infra, ps.Network)
|
||||
assert.Equal(summary.Node.NodeName, "test-node")
|
||||
assert.Equal(summary.Node.StartTime, cgroupStatsMap["/"].cs.StartTime)
|
||||
assert.Equal(summary.Node.CPU, cgroupStatsMap["/"].cs.CPU)
|
||||
assert.Equal(summary.Node.Memory, cgroupStatsMap["/"].cs.Memory)
|
||||
assert.Equal(summary.Node.Network, cgroupStatsMap["/"].ns)
|
||||
assert.Equal(summary.Node.Fs, rootFsStats)
|
||||
assert.Equal(summary.Node.Runtime, &statsapi.RuntimeStats{ImageFs: imageFsStats})
|
||||
|
||||
// Validate Pod1 Results
|
||||
ps, found = indexPods[prf1]
|
||||
assert.True(t, found)
|
||||
assert.Len(t, ps.Containers, 1)
|
||||
con = ps.Containers[0]
|
||||
assert.Equal(t, cName10, con.Name)
|
||||
checkCPUStats(t, "Pod1Container0", seedPod1Container, con.CPU)
|
||||
checkMemoryStats(t, "Pod1Container0", seedPod1Container, infos["/pod1-c0"], con.Memory)
|
||||
checkNetworkStats(t, "Pod1", seedPod1Infra, ps.Network)
|
||||
|
||||
// Validate Pod2 Results
|
||||
ps, found = indexPods[prf2]
|
||||
assert.True(t, found)
|
||||
assert.Len(t, ps.Containers, 1)
|
||||
con = ps.Containers[0]
|
||||
assert.Equal(t, cName20, con.Name)
|
||||
checkCPUStats(t, "Pod2Container0", seedPod2Container, con.CPU)
|
||||
checkMemoryStats(t, "Pod2Container0", seedPod2Container, infos["/pod2-c0"], con.Memory)
|
||||
checkNetworkStats(t, "Pod2", seedPod2Infra, ps.Network)
|
||||
assert.Equal(len(summary.Node.SystemContainers), 3)
|
||||
assert.Contains(summary.Node.SystemContainers,
|
||||
statsapi.ContainerStats{
|
||||
Name: "kubelet",
|
||||
StartTime: cgroupStatsMap["/kubelet"].cs.StartTime,
|
||||
CPU: cgroupStatsMap["/kubelet"].cs.CPU,
|
||||
Memory: cgroupStatsMap["/kubelet"].cs.Memory,
|
||||
UserDefinedMetrics: cgroupStatsMap["/kubelet"].cs.UserDefinedMetrics,
|
||||
},
|
||||
statsapi.ContainerStats{
|
||||
Name: "system",
|
||||
StartTime: cgroupStatsMap["/system"].cs.StartTime,
|
||||
CPU: cgroupStatsMap["/system"].cs.CPU,
|
||||
Memory: cgroupStatsMap["/system"].cs.Memory,
|
||||
UserDefinedMetrics: cgroupStatsMap["/system"].cs.UserDefinedMetrics,
|
||||
},
|
||||
statsapi.ContainerStats{
|
||||
Name: "runtime",
|
||||
StartTime: cgroupStatsMap["/runtime"].cs.StartTime,
|
||||
CPU: cgroupStatsMap["/runtime"].cs.CPU,
|
||||
Memory: cgroupStatsMap["/runtime"].cs.Memory,
|
||||
UserDefinedMetrics: cgroupStatsMap["/runtime"].cs.UserDefinedMetrics,
|
||||
},
|
||||
)
|
||||
assert.Equal(summary.Pods, podStats)
|
||||
}
|
||||
|
||||
func generateCustomMetricSpec() []v1.MetricSpec {
|
||||
f := fuzz.New().NilChance(0).Funcs(
|
||||
func(e *v1.MetricSpec, c fuzz.Continue) {
|
||||
c.Fuzz(&e.Name)
|
||||
switch c.Intn(3) {
|
||||
case 0:
|
||||
e.Type = v1.MetricGauge
|
||||
case 1:
|
||||
e.Type = v1.MetricCumulative
|
||||
case 2:
|
||||
e.Type = v1.MetricDelta
|
||||
}
|
||||
switch c.Intn(2) {
|
||||
case 0:
|
||||
e.Format = v1.IntType
|
||||
case 1:
|
||||
e.Format = v1.FloatType
|
||||
}
|
||||
c.Fuzz(&e.Units)
|
||||
})
|
||||
var ret []v1.MetricSpec
|
||||
f.Fuzz(&ret)
|
||||
return ret
|
||||
func getFsStats() *statsapi.FsStats {
|
||||
f := fuzz.New().NilChance(0)
|
||||
v := &statsapi.FsStats{}
|
||||
f.Fuzz(v)
|
||||
return v
|
||||
}
|
||||
|
||||
func generateCustomMetrics(spec []v1.MetricSpec) map[string][]v1.MetricVal {
|
||||
ret := map[string][]v1.MetricVal{}
|
||||
for _, metricSpec := range spec {
|
||||
f := fuzz.New().NilChance(0).Funcs(
|
||||
func(e *v1.MetricVal, c fuzz.Continue) {
|
||||
switch metricSpec.Format {
|
||||
case v1.IntType:
|
||||
c.Fuzz(&e.IntValue)
|
||||
case v1.FloatType:
|
||||
c.Fuzz(&e.FloatValue)
|
||||
}
|
||||
})
|
||||
|
||||
var metrics []v1.MetricVal
|
||||
f.Fuzz(&metrics)
|
||||
ret[metricSpec.Name] = metrics
|
||||
}
|
||||
return ret
|
||||
func getContainerStats() *statsapi.ContainerStats {
|
||||
f := fuzz.New().NilChance(0)
|
||||
v := &statsapi.ContainerStats{}
|
||||
f.Fuzz(v)
|
||||
return v
|
||||
}
|
||||
|
||||
func summaryTerminatedContainerInfo(seed int, podName string, podNamespace string, containerName string) v2.ContainerInfo {
|
||||
cinfo := summaryTestContainerInfo(seed, podName, podNamespace, containerName)
|
||||
cinfo.Stats[0].Memory.RSS = 0
|
||||
cinfo.Stats[0].CpuInst.Usage.Total = 0
|
||||
return cinfo
|
||||
func getVolumeStats() *statsapi.VolumeStats {
|
||||
f := fuzz.New().NilChance(0)
|
||||
v := &statsapi.VolumeStats{}
|
||||
f.Fuzz(v)
|
||||
return v
|
||||
}
|
||||
|
||||
func summaryTestContainerInfo(seed int, podName string, podNamespace string, containerName string) v2.ContainerInfo {
|
||||
labels := map[string]string{}
|
||||
if podName != "" {
|
||||
labels = map[string]string{
|
||||
"io.kubernetes.pod.name": podName,
|
||||
"io.kubernetes.pod.uid": "UID" + podName,
|
||||
"io.kubernetes.pod.namespace": podNamespace,
|
||||
"io.kubernetes.container.name": containerName,
|
||||
}
|
||||
}
|
||||
// by default, kernel will set memory.limit_in_bytes to 1 << 63 if not bounded
|
||||
unlimitedMemory := uint64(1 << 63)
|
||||
spec := v2.ContainerSpec{
|
||||
CreationTime: testTime(creationTime, seed),
|
||||
HasCpu: true,
|
||||
HasMemory: true,
|
||||
HasNetwork: true,
|
||||
Labels: labels,
|
||||
Memory: v2.MemorySpec{
|
||||
Limit: unlimitedMemory,
|
||||
},
|
||||
CustomMetrics: generateCustomMetricSpec(),
|
||||
}
|
||||
|
||||
stats := v2.ContainerStats{
|
||||
Timestamp: testTime(timestamp, seed),
|
||||
Cpu: &v1.CpuStats{},
|
||||
CpuInst: &v2.CpuInstStats{},
|
||||
Memory: &v1.MemoryStats{
|
||||
Usage: uint64(seed + offsetMemUsageBytes),
|
||||
WorkingSet: uint64(seed + offsetMemWorkingSetBytes),
|
||||
RSS: uint64(seed + offsetMemRSSBytes),
|
||||
ContainerData: v1.MemoryStatsMemoryData{
|
||||
Pgfault: uint64(seed + offsetMemPageFaults),
|
||||
Pgmajfault: uint64(seed + offsetMemMajorPageFaults),
|
||||
},
|
||||
},
|
||||
Network: &v2.NetworkStats{
|
||||
Interfaces: []v1.InterfaceStats{{
|
||||
Name: "eth0",
|
||||
RxBytes: uint64(seed + offsetNetRxBytes),
|
||||
RxErrors: uint64(seed + offsetNetRxErrors),
|
||||
TxBytes: uint64(seed + offsetNetTxBytes),
|
||||
TxErrors: uint64(seed + offsetNetTxErrors),
|
||||
}, {
|
||||
Name: "cbr0",
|
||||
RxBytes: 100,
|
||||
RxErrors: 100,
|
||||
TxBytes: 100,
|
||||
TxErrors: 100,
|
||||
}},
|
||||
},
|
||||
CustomMetrics: generateCustomMetrics(spec.CustomMetrics),
|
||||
}
|
||||
stats.Cpu.Usage.Total = uint64(seed + offsetCPUUsageCoreSeconds)
|
||||
stats.CpuInst.Usage.Total = uint64(seed + offsetCPUUsageCores)
|
||||
return v2.ContainerInfo{
|
||||
Spec: spec,
|
||||
Stats: []*v2.ContainerStats{&stats},
|
||||
}
|
||||
}
|
||||
|
||||
func testTime(base time.Time, seed int) time.Time {
|
||||
return base.Add(time.Duration(seed) * time.Second)
|
||||
}
|
||||
|
||||
func checkNetworkStats(t *testing.T, label string, seed int, stats *kubestats.NetworkStats) {
|
||||
assert.NotNil(t, stats)
|
||||
assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".Net.Time")
|
||||
assert.EqualValues(t, seed+offsetNetRxBytes, *stats.RxBytes, label+".Net.RxBytes")
|
||||
assert.EqualValues(t, seed+offsetNetRxErrors, *stats.RxErrors, label+".Net.RxErrors")
|
||||
assert.EqualValues(t, seed+offsetNetTxBytes, *stats.TxBytes, label+".Net.TxBytes")
|
||||
assert.EqualValues(t, seed+offsetNetTxErrors, *stats.TxErrors, label+".Net.TxErrors")
|
||||
}
|
||||
|
||||
func checkCPUStats(t *testing.T, label string, seed int, stats *kubestats.CPUStats) {
|
||||
assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".CPU.Time")
|
||||
assert.EqualValues(t, seed+offsetCPUUsageCores, *stats.UsageNanoCores, label+".CPU.UsageCores")
|
||||
assert.EqualValues(t, seed+offsetCPUUsageCoreSeconds, *stats.UsageCoreNanoSeconds, label+".CPU.UsageCoreSeconds")
|
||||
}
|
||||
|
||||
func checkMemoryStats(t *testing.T, label string, seed int, info v2.ContainerInfo, stats *kubestats.MemoryStats) {
|
||||
assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".Mem.Time")
|
||||
assert.EqualValues(t, seed+offsetMemUsageBytes, *stats.UsageBytes, label+".Mem.UsageBytes")
|
||||
assert.EqualValues(t, seed+offsetMemWorkingSetBytes, *stats.WorkingSetBytes, label+".Mem.WorkingSetBytes")
|
||||
assert.EqualValues(t, seed+offsetMemRSSBytes, *stats.RSSBytes, label+".Mem.RSSBytes")
|
||||
assert.EqualValues(t, seed+offsetMemPageFaults, *stats.PageFaults, label+".Mem.PageFaults")
|
||||
assert.EqualValues(t, seed+offsetMemMajorPageFaults, *stats.MajorPageFaults, label+".Mem.MajorPageFaults")
|
||||
if !info.Spec.HasMemory || isMemoryUnlimited(info.Spec.Memory.Limit) {
|
||||
assert.Nil(t, stats.AvailableBytes, label+".Mem.AvailableBytes")
|
||||
} else {
|
||||
expected := info.Spec.Memory.Limit - *stats.WorkingSetBytes
|
||||
assert.EqualValues(t, expected, *stats.AvailableBytes, label+".Mem.AvailableBytes")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustomMetrics(t *testing.T) {
|
||||
spec := []v1.MetricSpec{
|
||||
{
|
||||
Name: "qos",
|
||||
Type: v1.MetricGauge,
|
||||
Format: v1.IntType,
|
||||
Units: "per second",
|
||||
},
|
||||
{
|
||||
Name: "cpuLoad",
|
||||
Type: v1.MetricCumulative,
|
||||
Format: v1.FloatType,
|
||||
Units: "count",
|
||||
},
|
||||
}
|
||||
timestamp1 := time.Now()
|
||||
timestamp2 := time.Now().Add(time.Minute)
|
||||
metrics := map[string][]v1.MetricVal{
|
||||
"qos": {
|
||||
{
|
||||
Timestamp: timestamp1,
|
||||
IntValue: 10,
|
||||
},
|
||||
{
|
||||
Timestamp: timestamp2,
|
||||
IntValue: 100,
|
||||
},
|
||||
},
|
||||
"cpuLoad": {
|
||||
{
|
||||
Timestamp: timestamp1,
|
||||
FloatValue: 1.2,
|
||||
},
|
||||
{
|
||||
Timestamp: timestamp2,
|
||||
FloatValue: 2.1,
|
||||
},
|
||||
},
|
||||
}
|
||||
cInfo := v2.ContainerInfo{
|
||||
Spec: v2.ContainerSpec{
|
||||
CustomMetrics: spec,
|
||||
},
|
||||
Stats: []*v2.ContainerStats{
|
||||
{
|
||||
CustomMetrics: metrics,
|
||||
},
|
||||
},
|
||||
}
|
||||
sb := &summaryBuilder{}
|
||||
assert.Contains(t, sb.containerInfoV2ToUserDefinedMetrics(&cInfo),
|
||||
kubestats.UserDefinedMetric{
|
||||
UserDefinedMetricDescriptor: kubestats.UserDefinedMetricDescriptor{
|
||||
Name: "qos",
|
||||
Type: kubestats.MetricGauge,
|
||||
Units: "per second",
|
||||
},
|
||||
Time: metav1.NewTime(timestamp2),
|
||||
Value: 100,
|
||||
},
|
||||
kubestats.UserDefinedMetric{
|
||||
UserDefinedMetricDescriptor: kubestats.UserDefinedMetricDescriptor{
|
||||
Name: "cpuLoad",
|
||||
Type: kubestats.MetricCumulative,
|
||||
Units: "count",
|
||||
},
|
||||
Time: metav1.NewTime(timestamp2),
|
||||
Value: 2.1,
|
||||
})
|
||||
func getNetworkStats() *statsapi.NetworkStats {
|
||||
f := fuzz.New().NilChance(0)
|
||||
v := &statsapi.NetworkStats{}
|
||||
f.Fuzz(v)
|
||||
return v
|
||||
}
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["mock_stats_provider.go"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/mock:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
|
@ -0,0 +1,280 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import cm "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
import corev1 "k8s.io/api/core/v1"
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
|
||||
import types "k8s.io/apimachinery/pkg/types"
|
||||
import v1 "github.com/google/cadvisor/info/v1"
|
||||
import v1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
import volume "k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
// DO NOT EDIT
|
||||
// GENERATED BY mockery
|
||||
|
||||
// StatsProvider is an autogenerated mock type for the StatsProvider type
|
||||
type StatsProvider struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetCgroupStats provides a mock function with given fields: cgroupName
|
||||
func (_m *StatsProvider) GetCgroupStats(cgroupName string) (*v1alpha1.ContainerStats, *v1alpha1.NetworkStats, error) {
|
||||
ret := _m.Called(cgroupName)
|
||||
|
||||
var r0 *v1alpha1.ContainerStats
|
||||
if rf, ok := ret.Get(0).(func(string) *v1alpha1.ContainerStats); ok {
|
||||
r0 = rf(cgroupName)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1alpha1.ContainerStats)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 *v1alpha1.NetworkStats
|
||||
if rf, ok := ret.Get(1).(func(string) *v1alpha1.NetworkStats); ok {
|
||||
r1 = rf(cgroupName)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(*v1alpha1.NetworkStats)
|
||||
}
|
||||
}
|
||||
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(2).(func(string) error); ok {
|
||||
r2 = rf(cgroupName)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
|
||||
return r0, r1, r2
|
||||
}
|
||||
|
||||
// GetContainerInfo provides a mock function with given fields: podFullName, uid, containerName, req
|
||||
func (_m *StatsProvider) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *v1.ContainerInfoRequest) (*v1.ContainerInfo, error) {
|
||||
ret := _m.Called(podFullName, uid, containerName, req)
|
||||
|
||||
var r0 *v1.ContainerInfo
|
||||
if rf, ok := ret.Get(0).(func(string, types.UID, string, *v1.ContainerInfoRequest) *v1.ContainerInfo); ok {
|
||||
r0 = rf(podFullName, uid, containerName, req)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1.ContainerInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, types.UID, string, *v1.ContainerInfoRequest) error); ok {
|
||||
r1 = rf(podFullName, uid, containerName, req)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetNode provides a mock function with given fields:
|
||||
func (_m *StatsProvider) GetNode() (*corev1.Node, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *corev1.Node
|
||||
if rf, ok := ret.Get(0).(func() *corev1.Node); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*corev1.Node)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetNodeConfig provides a mock function with given fields:
|
||||
func (_m *StatsProvider) GetNodeConfig() cm.NodeConfig {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 cm.NodeConfig
|
||||
if rf, ok := ret.Get(0).(func() cm.NodeConfig); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(cm.NodeConfig)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetPodByName provides a mock function with given fields: namespace, name
|
||||
func (_m *StatsProvider) GetPodByName(namespace string, name string) (*corev1.Pod, bool) {
|
||||
ret := _m.Called(namespace, name)
|
||||
|
||||
var r0 *corev1.Pod
|
||||
if rf, ok := ret.Get(0).(func(string, string) *corev1.Pod); ok {
|
||||
r0 = rf(namespace, name)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*corev1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(1).(func(string, string) bool); ok {
|
||||
r1 = rf(namespace, name)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetPods provides a mock function with given fields:
|
||||
func (_m *StatsProvider) GetPods() []*corev1.Pod {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []*corev1.Pod
|
||||
if rf, ok := ret.Get(0).(func() []*corev1.Pod); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*corev1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetRawContainerInfo provides a mock function with given fields: containerName, req, subcontainers
|
||||
func (_m *StatsProvider) GetRawContainerInfo(containerName string, req *v1.ContainerInfoRequest, subcontainers bool) (map[string]*v1.ContainerInfo, error) {
|
||||
ret := _m.Called(containerName, req, subcontainers)
|
||||
|
||||
var r0 map[string]*v1.ContainerInfo
|
||||
if rf, ok := ret.Get(0).(func(string, *v1.ContainerInfoRequest, bool) map[string]*v1.ContainerInfo); ok {
|
||||
r0 = rf(containerName, req, subcontainers)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[string]*v1.ContainerInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, *v1.ContainerInfoRequest, bool) error); ok {
|
||||
r1 = rf(containerName, req, subcontainers)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ImageFsStats provides a mock function with given fields:
|
||||
func (_m *StatsProvider) ImageFsStats() (*v1alpha1.FsStats, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *v1alpha1.FsStats
|
||||
if rf, ok := ret.Get(0).(func() *v1alpha1.FsStats); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1alpha1.FsStats)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListPodStats provides a mock function with given fields:
|
||||
func (_m *StatsProvider) ListPodStats() ([]v1alpha1.PodStats, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []v1alpha1.PodStats
|
||||
if rf, ok := ret.Get(0).(func() []v1alpha1.PodStats); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]v1alpha1.PodStats)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListVolumesForPod provides a mock function with given fields: podUID
|
||||
func (_m *StatsProvider) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) {
|
||||
ret := _m.Called(podUID)
|
||||
|
||||
var r0 map[string]volume.Volume
|
||||
if rf, ok := ret.Get(0).(func(types.UID) map[string]volume.Volume); ok {
|
||||
r0 = rf(podUID)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[string]volume.Volume)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(1).(func(types.UID) bool); ok {
|
||||
r1 = rf(podUID)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// RootFsStats provides a mock function with given fields:
|
||||
func (_m *StatsProvider) RootFsStats() (*v1alpha1.FsStats, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *v1alpha1.FsStats
|
||||
if rf, ok := ret.Get(0).(func() *v1alpha1.FsStats); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1alpha1.FsStats)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cadvisor_stats_provider.go",
|
||||
"cri_stats_provider.go",
|
||||
"helper.go",
|
||||
"stats_provider.go",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/leaky:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/server/stats:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cadvisor_stats_provider_test.go",
|
||||
"helper_test.go",
|
||||
"stats_provider_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cadvisor/testing:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/leaky:go_default_library",
|
||||
"//pkg/kubelet/pod/testing:go_default_library",
|
||||
"//pkg/kubelet/server/stats:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
|
||||
"//vendor/github.com/google/gofuzz:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
|
@ -0,0 +1,288 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/leaky"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// cadvisorStatsProvider implements the containerStatsProvider interface by
|
||||
// getting the container stats from cAdvisor. This is needed by docker and rkt
|
||||
// integrations since they do not provide stats from CRI.
|
||||
type cadvisorStatsProvider struct {
|
||||
// cadvisor is used to get the stats of the cgroup for the containers that
|
||||
// are managed by pods.
|
||||
cadvisor cadvisor.Interface
|
||||
// resourceAnalyzer is used to get the volume stats of the pods.
|
||||
resourceAnalyzer stats.ResourceAnalyzer
|
||||
// imageService is used to get the stats of the image filesystem.
|
||||
imageService kubecontainer.ImageService
|
||||
}
|
||||
|
||||
// newCadvisorStatsProvider returns a containerStatsProvider that provides
|
||||
// container stats from cAdvisor.
|
||||
func newCadvisorStatsProvider(
|
||||
cadvisor cadvisor.Interface,
|
||||
resourceAnalyzer stats.ResourceAnalyzer,
|
||||
imageService kubecontainer.ImageService,
|
||||
) containerStatsProvider {
|
||||
return &cadvisorStatsProvider{
|
||||
cadvisor: cadvisor,
|
||||
resourceAnalyzer: resourceAnalyzer,
|
||||
imageService: imageService,
|
||||
}
|
||||
}
|
||||
|
||||
// ListPodStats returns the stats of all the pod-managed containers.
|
||||
func (p *cadvisorStatsProvider) ListPodStats() ([]statsapi.PodStats, error) {
|
||||
// Gets node root filesystem information and image filesystem stats, which
|
||||
// will be used to populate the available and capacity bytes/inodes in
|
||||
// container stats.
|
||||
rootFsInfo, err := p.cadvisor.RootFsInfo()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get rootFs info: %v", err)
|
||||
}
|
||||
imageFsInfo, err := p.cadvisor.ImagesFsInfo()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get imageFs info: %v", err)
|
||||
}
|
||||
|
||||
infos, err := p.cadvisor.ContainerInfoV2("/", cadvisorapiv2.RequestOptions{
|
||||
IdType: cadvisorapiv2.TypeName,
|
||||
Count: 2, // 2 samples are needed to compute "instantaneous" CPU
|
||||
Recursive: true,
|
||||
})
|
||||
if err != nil {
|
||||
if _, ok := infos["/"]; ok {
|
||||
// If the failure is partial, log it and return a best-effort
|
||||
// response.
|
||||
glog.Errorf("Partial failure issuing cadvisor.ContainerInfoV2: %v", err)
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to get root cgroup stats: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
infos = removeTerminatedContainerInfo(infos)
|
||||
|
||||
// Map each container to a pod and update the PodStats with container data.
|
||||
podToStats := map[statsapi.PodReference]*statsapi.PodStats{}
|
||||
for key, cinfo := range infos {
|
||||
// On systemd using devicemapper each mount into the container has an
|
||||
// associated cgroup. We ignore them to ensure we do not get duplicate
|
||||
// entries in our summary. For details on .mount units:
|
||||
// http://man7.org/linux/man-pages/man5/systemd.mount.5.html
|
||||
if strings.HasSuffix(key, ".mount") {
|
||||
continue
|
||||
}
|
||||
// Build the Pod key if this container is managed by a Pod
|
||||
if !isPodManagedContainer(&cinfo) {
|
||||
continue
|
||||
}
|
||||
ref := buildPodRef(&cinfo)
|
||||
|
||||
// Lookup the PodStats for the pod using the PodRef. If none exists,
|
||||
// initialize a new entry.
|
||||
podStats, found := podToStats[ref]
|
||||
if !found {
|
||||
podStats = &statsapi.PodStats{PodRef: ref}
|
||||
podToStats[ref] = podStats
|
||||
}
|
||||
|
||||
// Update the PodStats entry with the stats from the container by
|
||||
// adding it to podStats.Containers.
|
||||
containerName := kubetypes.GetContainerName(cinfo.Spec.Labels)
|
||||
if containerName == leaky.PodInfraContainerName {
|
||||
// Special case for infrastructure container which is hidden from
|
||||
// the user and has network stats.
|
||||
podStats.Network = cadvisorInfoToNetworkStats("pod:"+ref.Namespace+"_"+ref.Name, &cinfo)
|
||||
podStats.StartTime = metav1.NewTime(cinfo.Spec.CreationTime)
|
||||
} else {
|
||||
podStats.Containers = append(podStats.Containers, *cadvisorInfoToContainerStats(containerName, &cinfo, &rootFsInfo, &imageFsInfo))
|
||||
}
|
||||
}
|
||||
|
||||
// Add each PodStats to the result.
|
||||
result := make([]statsapi.PodStats, 0, len(podToStats))
|
||||
for _, podStats := range podToStats {
|
||||
// Lookup the volume stats for each pod.
|
||||
podUID := types.UID(podStats.PodRef.UID)
|
||||
if vstats, found := p.resourceAnalyzer.GetPodVolumeStats(podUID); found {
|
||||
podStats.VolumeStats = vstats.Volumes
|
||||
}
|
||||
result = append(result, *podStats)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ImageFsStats returns the stats of the filesystem for storing images.
|
||||
func (p *cadvisorStatsProvider) ImageFsStats() (*statsapi.FsStats, error) {
|
||||
imageFsInfo, err := p.cadvisor.ImagesFsInfo()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get imageFs info: %v", err)
|
||||
}
|
||||
imageStats, err := p.imageService.ImageStats()
|
||||
if err != nil || imageStats == nil {
|
||||
return nil, fmt.Errorf("failed to get image stats: %v", err)
|
||||
}
|
||||
|
||||
var imageFsInodesUsed *uint64
|
||||
if imageFsInfo.Inodes != nil && imageFsInfo.InodesFree != nil {
|
||||
imageFsIU := *imageFsInfo.Inodes - *imageFsInfo.InodesFree
|
||||
imageFsInodesUsed = &imageFsIU
|
||||
}
|
||||
|
||||
// Get the root container stats's timestamp, which will be used as the
|
||||
// imageFs stats timestamp.
|
||||
rootStats, err := getCgroupStats(p.cadvisor, "/")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get root container stats: %v", err)
|
||||
}
|
||||
|
||||
return &statsapi.FsStats{
|
||||
Time: metav1.NewTime(rootStats.Timestamp),
|
||||
AvailableBytes: &imageFsInfo.Available,
|
||||
CapacityBytes: &imageFsInfo.Capacity,
|
||||
UsedBytes: &imageStats.TotalStorageBytes,
|
||||
InodesFree: imageFsInfo.InodesFree,
|
||||
Inodes: imageFsInfo.Inodes,
|
||||
InodesUsed: imageFsInodesUsed,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// buildPodRef returns a PodReference that identifies the Pod managing cinfo
|
||||
func buildPodRef(cinfo *cadvisorapiv2.ContainerInfo) statsapi.PodReference {
|
||||
podName := kubetypes.GetPodName(cinfo.Spec.Labels)
|
||||
podNamespace := kubetypes.GetPodNamespace(cinfo.Spec.Labels)
|
||||
podUID := kubetypes.GetPodUID(cinfo.Spec.Labels)
|
||||
return statsapi.PodReference{Name: podName, Namespace: podNamespace, UID: podUID}
|
||||
}
|
||||
|
||||
// isPodManagedContainer returns true if the cinfo container is managed by a Pod
|
||||
func isPodManagedContainer(cinfo *cadvisorapiv2.ContainerInfo) bool {
|
||||
podName := kubetypes.GetPodName(cinfo.Spec.Labels)
|
||||
podNamespace := kubetypes.GetPodNamespace(cinfo.Spec.Labels)
|
||||
managed := podName != "" && podNamespace != ""
|
||||
if !managed && podName != podNamespace {
|
||||
glog.Warningf(
|
||||
"Expect container to have either both podName (%s) and podNamespace (%s) labels, or neither.",
|
||||
podName, podNamespace)
|
||||
}
|
||||
return managed
|
||||
}
|
||||
|
||||
// removeTerminatedContainerInfo returns the specified containerInfo but with
|
||||
// the stats of the terminated containers removed.
|
||||
//
|
||||
// A ContainerInfo is considered to be of a terminated container if it has an
|
||||
// older CreationTime and zero CPU instantaneous and memory RSS usage.
|
||||
func removeTerminatedContainerInfo(containerInfo map[string]cadvisorapiv2.ContainerInfo) map[string]cadvisorapiv2.ContainerInfo {
|
||||
cinfoMap := make(map[containerID][]containerInfoWithCgroup)
|
||||
for key, cinfo := range containerInfo {
|
||||
if !isPodManagedContainer(&cinfo) {
|
||||
continue
|
||||
}
|
||||
cinfoID := containerID{
|
||||
podRef: buildPodRef(&cinfo),
|
||||
containerName: kubetypes.GetContainerName(cinfo.Spec.Labels),
|
||||
}
|
||||
cinfoMap[cinfoID] = append(cinfoMap[cinfoID], containerInfoWithCgroup{
|
||||
cinfo: cinfo,
|
||||
cgroup: key,
|
||||
})
|
||||
}
|
||||
result := make(map[string]cadvisorapiv2.ContainerInfo)
|
||||
for _, refs := range cinfoMap {
|
||||
if len(refs) == 1 {
|
||||
result[refs[0].cgroup] = refs[0].cinfo
|
||||
continue
|
||||
}
|
||||
sort.Sort(ByCreationTime(refs))
|
||||
i := 0
|
||||
for ; i < len(refs); i++ {
|
||||
if hasMemoryAndCPUInstUsage(&refs[i].cinfo) {
|
||||
// Stops removing when we first see an info with non-zero
|
||||
// CPU/Memory usage.
|
||||
break
|
||||
}
|
||||
}
|
||||
for ; i < len(refs); i++ {
|
||||
result[refs[i].cgroup] = refs[i].cinfo
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ByCreationTime implements sort.Interface for []containerInfoWithCgroup based
|
||||
// on the cinfo.Spec.CreationTime field.
|
||||
type ByCreationTime []containerInfoWithCgroup
|
||||
|
||||
func (a ByCreationTime) Len() int { return len(a) }
|
||||
func (a ByCreationTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByCreationTime) Less(i, j int) bool {
|
||||
if a[i].cinfo.Spec.CreationTime.Equal(a[j].cinfo.Spec.CreationTime) {
|
||||
// There shouldn't be two containers with the same name and/or the same
|
||||
// creation time. However, to make the logic here robust, we break the
|
||||
// tie by moving the one without CPU instantaneous or memory RSS usage
|
||||
// to the beginning.
|
||||
return hasMemoryAndCPUInstUsage(&a[j].cinfo)
|
||||
}
|
||||
return a[i].cinfo.Spec.CreationTime.Before(a[j].cinfo.Spec.CreationTime)
|
||||
}
|
||||
|
||||
// containerID is the identity of a container in a pod.
|
||||
type containerID struct {
|
||||
podRef statsapi.PodReference
|
||||
containerName string
|
||||
}
|
||||
|
||||
// containerInfoWithCgroup contains the ContainerInfo and its cgroup name.
|
||||
type containerInfoWithCgroup struct {
|
||||
cinfo cadvisorapiv2.ContainerInfo
|
||||
cgroup string
|
||||
}
|
||||
|
||||
// hasMemoryAndCPUInstUsage returns true if the specified container info has
|
||||
// both non-zero CPU instantaneous usage and non-zero memory RSS usage, and
|
||||
// false otherwise.
|
||||
func hasMemoryAndCPUInstUsage(info *cadvisorapiv2.ContainerInfo) bool {
|
||||
if !info.Spec.HasCpu || !info.Spec.HasMemory {
|
||||
return false
|
||||
}
|
||||
cstat, found := latestContainerStats(info)
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
if cstat.CpuInst == nil {
|
||||
return false
|
||||
}
|
||||
return cstat.CpuInst.Usage.Total != 0 && cstat.Memory.RSS != 0
|
||||
}
|
|
@ -0,0 +1,271 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/leaky"
|
||||
)
|
||||
|
||||
func TestRemoveTerminatedContainerInfo(t *testing.T) {
|
||||
const (
|
||||
seedPastPod0Infra = 1000
|
||||
seedPastPod0Container0 = 2000
|
||||
seedPod0Infra = 3000
|
||||
seedPod0Container0 = 4000
|
||||
)
|
||||
const (
|
||||
namespace = "test"
|
||||
pName0 = "pod0"
|
||||
cName00 = "c0"
|
||||
)
|
||||
infos := map[string]cadvisorapiv2.ContainerInfo{
|
||||
// ContainerInfo with past creation time and no CPU/memory usage for
|
||||
// simulating uncleaned cgroups of already terminated containers, which
|
||||
// should not be shown in the results.
|
||||
"/pod0-i-terminated-1": getTerminatedContainerInfo(seedPastPod0Infra, pName0, namespace, leaky.PodInfraContainerName),
|
||||
"/pod0-c0-terminated-1": getTerminatedContainerInfo(seedPastPod0Container0, pName0, namespace, cName00),
|
||||
|
||||
// Same as above but uses the same creation time as the latest
|
||||
// containers. They are terminated containers, so they should not be in
|
||||
// the results.
|
||||
"/pod0-i-terminated-2": getTerminatedContainerInfo(seedPod0Infra, pName0, namespace, leaky.PodInfraContainerName),
|
||||
"/pod0-c0-terminated-2": getTerminatedContainerInfo(seedPod0Container0, pName0, namespace, cName00),
|
||||
|
||||
// The latest containers, which should be in the results.
|
||||
"/pod0-i": getTestContainerInfo(seedPod0Infra, pName0, namespace, leaky.PodInfraContainerName),
|
||||
"/pod0-c0": getTestContainerInfo(seedPod0Container0, pName0, namespace, cName00),
|
||||
|
||||
// Duplicated containers with non-zero CPU and memory usage. This case
|
||||
// shouldn't happen unless something goes wrong, but we want to test
|
||||
// that the metrics reporting logic works in this scenario.
|
||||
"/pod0-i-duplicated": getTestContainerInfo(seedPod0Infra, pName0, namespace, leaky.PodInfraContainerName),
|
||||
"/pod0-c0-duplicated": getTestContainerInfo(seedPod0Container0, pName0, namespace, cName00),
|
||||
}
|
||||
output := removeTerminatedContainerInfo(infos)
|
||||
assert.Len(t, output, 4)
|
||||
for _, c := range []string{"/pod0-i", "/pod0-c0", "/pod0-i-duplicated", "/pod0-c0-duplicated"} {
|
||||
if _, found := output[c]; !found {
|
||||
t.Errorf("%q is expected to be in the output\n", c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListPodStats(t *testing.T) {
|
||||
const (
|
||||
namespace0 = "test0"
|
||||
namespace2 = "test2"
|
||||
)
|
||||
const (
|
||||
seedRoot = 0
|
||||
seedRuntime = 100
|
||||
seedKubelet = 200
|
||||
seedMisc = 300
|
||||
seedPod0Infra = 1000
|
||||
seedPod0Container0 = 2000
|
||||
seedPod0Container1 = 2001
|
||||
seedPod1Infra = 3000
|
||||
seedPod1Container = 4000
|
||||
seedPod2Infra = 5000
|
||||
seedPod2Container = 6000
|
||||
)
|
||||
const (
|
||||
pName0 = "pod0"
|
||||
pName1 = "pod1"
|
||||
pName2 = "pod0" // ensure pName2 conflicts with pName0, but is in a different namespace
|
||||
)
|
||||
const (
|
||||
cName00 = "c0"
|
||||
cName01 = "c1"
|
||||
cName10 = "c0" // ensure cName10 conflicts with cName02, but is in a different pod
|
||||
cName20 = "c1" // ensure cName20 conflicts with cName01, but is in a different pod + namespace
|
||||
)
|
||||
const (
|
||||
rootfsCapacity = uint64(10000000)
|
||||
rootfsAvailable = uint64(5000000)
|
||||
rootfsInodesFree = uint64(1000)
|
||||
rootfsInodes = uint64(2000)
|
||||
imagefsCapacity = uint64(20000000)
|
||||
imagefsAvailable = uint64(8000000)
|
||||
imagefsInodesFree = uint64(2000)
|
||||
imagefsInodes = uint64(4000)
|
||||
)
|
||||
|
||||
prf0 := statsapi.PodReference{Name: pName0, Namespace: namespace0, UID: "UID" + pName0}
|
||||
prf1 := statsapi.PodReference{Name: pName1, Namespace: namespace0, UID: "UID" + pName1}
|
||||
prf2 := statsapi.PodReference{Name: pName2, Namespace: namespace2, UID: "UID" + pName2}
|
||||
infos := map[string]cadvisorapiv2.ContainerInfo{
|
||||
"/": getTestContainerInfo(seedRoot, "", "", ""),
|
||||
"/docker-daemon": getTestContainerInfo(seedRuntime, "", "", ""),
|
||||
"/kubelet": getTestContainerInfo(seedKubelet, "", "", ""),
|
||||
"/system": getTestContainerInfo(seedMisc, "", "", ""),
|
||||
// Pod0 - Namespace0
|
||||
"/pod0-i": getTestContainerInfo(seedPod0Infra, pName0, namespace0, leaky.PodInfraContainerName),
|
||||
"/pod0-c0": getTestContainerInfo(seedPod0Container0, pName0, namespace0, cName00),
|
||||
"/pod0-c1": getTestContainerInfo(seedPod0Container1, pName0, namespace0, cName01),
|
||||
// Pod1 - Namespace0
|
||||
"/pod1-i": getTestContainerInfo(seedPod1Infra, pName1, namespace0, leaky.PodInfraContainerName),
|
||||
"/pod1-c0": getTestContainerInfo(seedPod1Container, pName1, namespace0, cName10),
|
||||
// Pod2 - Namespace2
|
||||
"/pod2-i": getTestContainerInfo(seedPod2Infra, pName2, namespace2, leaky.PodInfraContainerName),
|
||||
"/pod2-c0": getTestContainerInfo(seedPod2Container, pName2, namespace2, cName20),
|
||||
}
|
||||
|
||||
freeRootfsInodes := rootfsInodesFree
|
||||
totalRootfsInodes := rootfsInodes
|
||||
rootfs := cadvisorapiv2.FsInfo{
|
||||
Capacity: rootfsCapacity,
|
||||
Available: rootfsAvailable,
|
||||
InodesFree: &freeRootfsInodes,
|
||||
Inodes: &totalRootfsInodes,
|
||||
}
|
||||
|
||||
freeImagefsInodes := imagefsInodesFree
|
||||
totalImagefsInodes := imagefsInodes
|
||||
imagefs := cadvisorapiv2.FsInfo{
|
||||
Capacity: imagefsCapacity,
|
||||
Available: imagefsAvailable,
|
||||
InodesFree: &freeImagefsInodes,
|
||||
Inodes: &totalImagefsInodes,
|
||||
}
|
||||
|
||||
// memory limit overrides for each container (used to test available bytes if a memory limit is known)
|
||||
memoryLimitOverrides := map[string]uint64{
|
||||
"/": uint64(1 << 30),
|
||||
"/pod2-c0": uint64(1 << 15),
|
||||
}
|
||||
for name, memoryLimitOverride := range memoryLimitOverrides {
|
||||
info, found := infos[name]
|
||||
if !found {
|
||||
t.Errorf("No container defined with name %v", name)
|
||||
}
|
||||
info.Spec.Memory.Limit = memoryLimitOverride
|
||||
infos[name] = info
|
||||
}
|
||||
|
||||
options := cadvisorapiv2.RequestOptions{
|
||||
IdType: cadvisorapiv2.TypeName,
|
||||
Count: 2,
|
||||
Recursive: true,
|
||||
}
|
||||
|
||||
mockCadvisor := new(cadvisortest.Mock)
|
||||
mockCadvisor.
|
||||
On("ContainerInfoV2", "/", options).Return(infos, nil).
|
||||
On("RootFsInfo").Return(rootfs, nil).
|
||||
On("ImagesFsInfo").Return(imagefs, nil)
|
||||
|
||||
mockRuntime := new(containertest.Mock)
|
||||
mockRuntime.
|
||||
On("ImageStats").Return(&kubecontainer.ImageStats{TotalStorageBytes: 123}, nil)
|
||||
|
||||
resourceAnalyzer := &fakeResourceAnalyzer{}
|
||||
|
||||
p := NewCadvisorStatsProvider(mockCadvisor, resourceAnalyzer, nil, nil, mockRuntime)
|
||||
pods, err := p.ListPodStats()
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 3, len(pods))
|
||||
indexPods := make(map[statsapi.PodReference]statsapi.PodStats, len(pods))
|
||||
for _, pod := range pods {
|
||||
indexPods[pod.PodRef] = pod
|
||||
}
|
||||
|
||||
// Validate Pod0 Results
|
||||
ps, found := indexPods[prf0]
|
||||
assert.True(t, found)
|
||||
assert.Len(t, ps.Containers, 2)
|
||||
indexCon := make(map[string]statsapi.ContainerStats, len(ps.Containers))
|
||||
for _, con := range ps.Containers {
|
||||
indexCon[con.Name] = con
|
||||
}
|
||||
con := indexCon[cName00]
|
||||
assert.EqualValues(t, testTime(creationTime, seedPod0Container0).Unix(), con.StartTime.Time.Unix())
|
||||
checkCPUStats(t, "Pod0Container0", seedPod0Container0, con.CPU)
|
||||
checkMemoryStats(t, "Pod0Conainer0", seedPod0Container0, infos["/pod0-c0"], con.Memory)
|
||||
|
||||
con = indexCon[cName01]
|
||||
assert.EqualValues(t, testTime(creationTime, seedPod0Container1).Unix(), con.StartTime.Time.Unix())
|
||||
checkCPUStats(t, "Pod0Container1", seedPod0Container1, con.CPU)
|
||||
checkMemoryStats(t, "Pod0Container1", seedPod0Container1, infos["/pod0-c1"], con.Memory)
|
||||
|
||||
assert.EqualValues(t, testTime(creationTime, seedPod0Infra).Unix(), ps.StartTime.Time.Unix())
|
||||
checkNetworkStats(t, "Pod0", seedPod0Infra, ps.Network)
|
||||
|
||||
// Validate Pod1 Results
|
||||
ps, found = indexPods[prf1]
|
||||
assert.True(t, found)
|
||||
assert.Len(t, ps.Containers, 1)
|
||||
con = ps.Containers[0]
|
||||
assert.Equal(t, cName10, con.Name)
|
||||
checkCPUStats(t, "Pod1Container0", seedPod1Container, con.CPU)
|
||||
checkMemoryStats(t, "Pod1Container0", seedPod1Container, infos["/pod1-c0"], con.Memory)
|
||||
checkNetworkStats(t, "Pod1", seedPod1Infra, ps.Network)
|
||||
|
||||
// Validate Pod2 Results
|
||||
ps, found = indexPods[prf2]
|
||||
assert.True(t, found)
|
||||
assert.Len(t, ps.Containers, 1)
|
||||
con = ps.Containers[0]
|
||||
assert.Equal(t, cName20, con.Name)
|
||||
checkCPUStats(t, "Pod2Container0", seedPod2Container, con.CPU)
|
||||
checkMemoryStats(t, "Pod2Container0", seedPod2Container, infos["/pod2-c0"], con.Memory)
|
||||
checkNetworkStats(t, "Pod2", seedPod2Infra, ps.Network)
|
||||
}
|
||||
|
||||
func TestImagesFsStats(t *testing.T) {
|
||||
var (
|
||||
assert = assert.New(t)
|
||||
mockCadvisor = new(cadvisortest.Mock)
|
||||
mockRuntime = new(containertest.Mock)
|
||||
|
||||
seed = 100
|
||||
options = cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false}
|
||||
imageFsInfo = getTestFsInfo(100)
|
||||
containerInfo = map[string]cadvisorapiv2.ContainerInfo{"/": getTestContainerInfo(seed, "test-pod", "test-ns", "test-container")}
|
||||
imageStats = &kubecontainer.ImageStats{TotalStorageBytes: 100}
|
||||
)
|
||||
|
||||
mockCadvisor.
|
||||
On("ImagesFsInfo").Return(imageFsInfo, nil).
|
||||
On("ContainerInfoV2", "/", options).Return(containerInfo, nil)
|
||||
mockRuntime.
|
||||
On("ImageStats").Return(imageStats, nil)
|
||||
|
||||
provider := newCadvisorStatsProvider(mockCadvisor, &fakeResourceAnalyzer{}, mockRuntime)
|
||||
stats, err := provider.ImageFsStats()
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(stats.Time, metav1.NewTime(containerInfo["/"].Stats[0].Timestamp))
|
||||
assert.Equal(*stats.AvailableBytes, imageFsInfo.Available)
|
||||
assert.Equal(*stats.CapacityBytes, imageFsInfo.Capacity)
|
||||
assert.Equal(*stats.UsedBytes, imageStats.TotalStorageBytes)
|
||||
assert.Equal(stats.InodesFree, imageFsInfo.InodesFree)
|
||||
assert.Equal(stats.Inodes, imageFsInfo.Inodes)
|
||||
assert.Equal(*stats.InodesUsed, *imageFsInfo.Inodes-*imageFsInfo.InodesFree)
|
||||
|
||||
mockCadvisor.AssertExpectations(t)
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
)
|
||||
|
||||
// criStatsProvider implements the containerStatsProvider interface by getting
|
||||
// the container stats from CRI.
|
||||
type criStatsProvider struct {
|
||||
// cadvisor is used to get the node root filesystem's stats (such as the
|
||||
// capacity/available bytes/inodes) that will be populated in per container
|
||||
// filesystem stats.
|
||||
cadvisor cadvisor.Interface
|
||||
// resourceAnalyzer is used to get the volume stats of the pods.
|
||||
resourceAnalyzer stats.ResourceAnalyzer
|
||||
// runtimeService is used to get the status and stats of the pods and its
|
||||
// managed containers.
|
||||
runtimeService internalapi.RuntimeService
|
||||
// imageService is used to get the stats of the image filesystem.
|
||||
imageService internalapi.ImageManagerService
|
||||
}
|
||||
|
||||
// newCRIStatsProvider returns a containerStatsProvider implementation that
|
||||
// provides container stats using CRI.
|
||||
func newCRIStatsProvider(
|
||||
cadvisor cadvisor.Interface,
|
||||
resourceAnalyzer stats.ResourceAnalyzer,
|
||||
runtimeService internalapi.RuntimeService,
|
||||
imageService internalapi.ImageManagerService,
|
||||
) containerStatsProvider {
|
||||
return &criStatsProvider{
|
||||
cadvisor: cadvisor,
|
||||
resourceAnalyzer: resourceAnalyzer,
|
||||
runtimeService: runtimeService,
|
||||
imageService: imageService,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (p *criStatsProvider) ImageFsStats() (*statsapi.FsStats, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
|
@ -0,0 +1,248 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
)
|
||||
|
||||
// cadvisorInfoToContainerStats returns the statsapi.ContainerStats converted
|
||||
// from the container and filesystem info.
|
||||
func cadvisorInfoToContainerStats(name string, info *cadvisorapiv2.ContainerInfo, rootFs, imageFs *cadvisorapiv2.FsInfo) *statsapi.ContainerStats {
|
||||
result := &statsapi.ContainerStats{
|
||||
StartTime: metav1.NewTime(info.Spec.CreationTime),
|
||||
Name: name,
|
||||
}
|
||||
|
||||
cstat, found := latestContainerStats(info)
|
||||
if !found {
|
||||
return result
|
||||
}
|
||||
|
||||
if info.Spec.HasCpu {
|
||||
cpuStats := statsapi.CPUStats{
|
||||
Time: metav1.NewTime(cstat.Timestamp),
|
||||
}
|
||||
if cstat.CpuInst != nil {
|
||||
cpuStats.UsageNanoCores = &cstat.CpuInst.Usage.Total
|
||||
}
|
||||
if cstat.Cpu != nil {
|
||||
cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total
|
||||
}
|
||||
result.CPU = &cpuStats
|
||||
}
|
||||
|
||||
if info.Spec.HasMemory {
|
||||
pageFaults := cstat.Memory.ContainerData.Pgfault
|
||||
majorPageFaults := cstat.Memory.ContainerData.Pgmajfault
|
||||
result.Memory = &statsapi.MemoryStats{
|
||||
Time: metav1.NewTime(cstat.Timestamp),
|
||||
UsageBytes: &cstat.Memory.Usage,
|
||||
WorkingSetBytes: &cstat.Memory.WorkingSet,
|
||||
RSSBytes: &cstat.Memory.RSS,
|
||||
PageFaults: &pageFaults,
|
||||
MajorPageFaults: &majorPageFaults,
|
||||
}
|
||||
// availableBytes = memory limit (if known) - workingset
|
||||
if !isMemoryUnlimited(info.Spec.Memory.Limit) {
|
||||
availableBytes := info.Spec.Memory.Limit - cstat.Memory.WorkingSet
|
||||
result.Memory.AvailableBytes = &availableBytes
|
||||
}
|
||||
}
|
||||
|
||||
// The container logs live on the node rootfs device
|
||||
result.Logs = &statsapi.FsStats{
|
||||
Time: metav1.NewTime(cstat.Timestamp),
|
||||
AvailableBytes: &rootFs.Available,
|
||||
CapacityBytes: &rootFs.Capacity,
|
||||
InodesFree: rootFs.InodesFree,
|
||||
Inodes: rootFs.Inodes,
|
||||
}
|
||||
|
||||
if rootFs.Inodes != nil && rootFs.InodesFree != nil {
|
||||
logsInodesUsed := *rootFs.Inodes - *rootFs.InodesFree
|
||||
result.Logs.InodesUsed = &logsInodesUsed
|
||||
}
|
||||
|
||||
// The container rootFs lives on the imageFs devices (which may not be the node root fs)
|
||||
result.Rootfs = &statsapi.FsStats{
|
||||
Time: metav1.NewTime(cstat.Timestamp),
|
||||
AvailableBytes: &imageFs.Available,
|
||||
CapacityBytes: &imageFs.Capacity,
|
||||
InodesFree: imageFs.InodesFree,
|
||||
Inodes: imageFs.Inodes,
|
||||
}
|
||||
|
||||
cfs := cstat.Filesystem
|
||||
if cfs != nil {
|
||||
if cfs.BaseUsageBytes != nil {
|
||||
rootfsUsage := *cfs.BaseUsageBytes
|
||||
result.Rootfs.UsedBytes = &rootfsUsage
|
||||
if cfs.TotalUsageBytes != nil {
|
||||
logsUsage := *cfs.TotalUsageBytes - *cfs.BaseUsageBytes
|
||||
result.Logs.UsedBytes = &logsUsage
|
||||
}
|
||||
}
|
||||
if cfs.InodeUsage != nil {
|
||||
rootInodes := *cfs.InodeUsage
|
||||
result.Rootfs.InodesUsed = &rootInodes
|
||||
}
|
||||
}
|
||||
|
||||
result.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(info)
|
||||
return result
|
||||
}
|
||||
|
||||
// cadvisorInfoToNetworkStats returns the statsapi.NetworkStats converted from
|
||||
// the container info from cadvisor.
|
||||
func cadvisorInfoToNetworkStats(name string, info *cadvisorapiv2.ContainerInfo) *statsapi.NetworkStats {
|
||||
if !info.Spec.HasNetwork {
|
||||
return nil
|
||||
}
|
||||
cstat, found := latestContainerStats(info)
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
for _, inter := range cstat.Network.Interfaces {
|
||||
if inter.Name == network.DefaultInterfaceName {
|
||||
return &statsapi.NetworkStats{
|
||||
Time: metav1.NewTime(cstat.Timestamp),
|
||||
RxBytes: &inter.RxBytes,
|
||||
RxErrors: &inter.RxErrors,
|
||||
TxBytes: &inter.TxBytes,
|
||||
TxErrors: &inter.TxErrors,
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Missing default interface %q for %s", network.DefaultInterfaceName, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// cadvisorInfoToUserDefinedMetrics returns the statsapi.UserDefinedMetric
|
||||
// converted from the container info from cadvisor.
|
||||
func cadvisorInfoToUserDefinedMetrics(info *cadvisorapiv2.ContainerInfo) []statsapi.UserDefinedMetric {
|
||||
type specVal struct {
|
||||
ref statsapi.UserDefinedMetricDescriptor
|
||||
valType cadvisorapiv1.DataType
|
||||
time time.Time
|
||||
value float64
|
||||
}
|
||||
udmMap := map[string]*specVal{}
|
||||
for _, spec := range info.Spec.CustomMetrics {
|
||||
udmMap[spec.Name] = &specVal{
|
||||
ref: statsapi.UserDefinedMetricDescriptor{
|
||||
Name: spec.Name,
|
||||
Type: statsapi.UserDefinedMetricType(spec.Type),
|
||||
Units: spec.Units,
|
||||
},
|
||||
valType: spec.Format,
|
||||
}
|
||||
}
|
||||
for _, stat := range info.Stats {
|
||||
for name, values := range stat.CustomMetrics {
|
||||
specVal, ok := udmMap[name]
|
||||
if !ok {
|
||||
glog.Warningf("spec for custom metric %q is missing from cAdvisor output. Spec: %+v, Metrics: %+v", name, info.Spec, stat.CustomMetrics)
|
||||
continue
|
||||
}
|
||||
for _, value := range values {
|
||||
// Pick the most recent value
|
||||
if value.Timestamp.Before(specVal.time) {
|
||||
continue
|
||||
}
|
||||
specVal.time = value.Timestamp
|
||||
specVal.value = value.FloatValue
|
||||
if specVal.valType == cadvisorapiv1.IntType {
|
||||
specVal.value = float64(value.IntValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
var udm []statsapi.UserDefinedMetric
|
||||
for _, specVal := range udmMap {
|
||||
udm = append(udm, statsapi.UserDefinedMetric{
|
||||
UserDefinedMetricDescriptor: specVal.ref,
|
||||
Time: metav1.NewTime(specVal.time),
|
||||
Value: specVal.value,
|
||||
})
|
||||
}
|
||||
return udm
|
||||
}
|
||||
|
||||
// latestContainerStats returns the latest container stats from cadvisor, or nil if none exist
|
||||
func latestContainerStats(info *cadvisorapiv2.ContainerInfo) (*cadvisorapiv2.ContainerStats, bool) {
|
||||
stats := info.Stats
|
||||
if len(stats) < 1 {
|
||||
return nil, false
|
||||
}
|
||||
latest := stats[len(stats)-1]
|
||||
if latest == nil {
|
||||
return nil, false
|
||||
}
|
||||
return latest, true
|
||||
}
|
||||
|
||||
func isMemoryUnlimited(v uint64) bool {
|
||||
// Size after which we consider memory to be "unlimited". This is not
|
||||
// MaxInt64 due to rounding by the kernel.
|
||||
// TODO: cadvisor should export this https://github.com/google/cadvisor/blob/master/metrics/prometheus.go#L596
|
||||
const maxMemorySize = uint64(1 << 62)
|
||||
|
||||
return v > maxMemorySize
|
||||
}
|
||||
|
||||
// getCgroupInfo returns the information of the container with the specified
|
||||
// containerName from cadvisor.
|
||||
func getCgroupInfo(cadvisor cadvisor.Interface, containerName string) (*cadvisorapiv2.ContainerInfo, error) {
|
||||
infoMap, err := cadvisor.ContainerInfoV2(containerName, cadvisorapiv2.RequestOptions{
|
||||
IdType: cadvisorapiv2.TypeName,
|
||||
Count: 2, // 2 samples are needed to compute "instantaneous" CPU
|
||||
Recursive: false,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get container info for %q: %v", containerName, err)
|
||||
}
|
||||
if len(infoMap) != 1 {
|
||||
return nil, fmt.Errorf("unexpected number of containers: %v", len(infoMap))
|
||||
}
|
||||
info := infoMap[containerName]
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// getCgroupStats returns the latest stats of the container having the
|
||||
// specified containerName from cadvisor.
|
||||
func getCgroupStats(cadvisor cadvisor.Interface, containerName string) (*cadvisorapiv2.ContainerStats, error) {
|
||||
info, err := getCgroupInfo(cadvisor, containerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stats, found := latestContainerStats(info)
|
||||
if !found {
|
||||
return nil, fmt.Errorf("failed to get latest stats from container info for %q", containerName)
|
||||
}
|
||||
return stats, nil
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
)
|
||||
|
||||
func TestCustomMetrics(t *testing.T) {
|
||||
spec := []cadvisorapiv1.MetricSpec{
|
||||
{
|
||||
Name: "qos",
|
||||
Type: cadvisorapiv1.MetricGauge,
|
||||
Format: cadvisorapiv1.IntType,
|
||||
Units: "per second",
|
||||
},
|
||||
{
|
||||
Name: "cpuLoad",
|
||||
Type: cadvisorapiv1.MetricCumulative,
|
||||
Format: cadvisorapiv1.FloatType,
|
||||
Units: "count",
|
||||
},
|
||||
}
|
||||
timestamp1 := time.Now()
|
||||
timestamp2 := time.Now().Add(time.Minute)
|
||||
metrics := map[string][]cadvisorapiv1.MetricVal{
|
||||
"qos": {
|
||||
{
|
||||
Timestamp: timestamp1,
|
||||
IntValue: 10,
|
||||
},
|
||||
{
|
||||
Timestamp: timestamp2,
|
||||
IntValue: 100,
|
||||
},
|
||||
},
|
||||
"cpuLoad": {
|
||||
{
|
||||
Timestamp: timestamp1,
|
||||
FloatValue: 1.2,
|
||||
},
|
||||
{
|
||||
Timestamp: timestamp2,
|
||||
FloatValue: 2.1,
|
||||
},
|
||||
},
|
||||
}
|
||||
cInfo := cadvisorapiv2.ContainerInfo{
|
||||
Spec: cadvisorapiv2.ContainerSpec{
|
||||
CustomMetrics: spec,
|
||||
},
|
||||
Stats: []*cadvisorapiv2.ContainerStats{
|
||||
{
|
||||
CustomMetrics: metrics,
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Contains(t, cadvisorInfoToUserDefinedMetrics(&cInfo),
|
||||
statsapi.UserDefinedMetric{
|
||||
UserDefinedMetricDescriptor: statsapi.UserDefinedMetricDescriptor{
|
||||
Name: "qos",
|
||||
Type: statsapi.MetricGauge,
|
||||
Units: "per second",
|
||||
},
|
||||
Time: metav1.NewTime(timestamp2),
|
||||
Value: 100,
|
||||
},
|
||||
statsapi.UserDefinedMetric{
|
||||
UserDefinedMetricDescriptor: statsapi.UserDefinedMetricDescriptor{
|
||||
Name: "cpuLoad",
|
||||
Type: statsapi.MetricCumulative,
|
||||
Units: "count",
|
||||
},
|
||||
Time: metav1.NewTime(timestamp2),
|
||||
Value: 2.1,
|
||||
})
|
||||
}
|
|
@ -0,0 +1,175 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
)
|
||||
|
||||
// NewCRIStatsProvider returns a StatsProvider that provides the node stats
|
||||
// from cAdvisor and the container stats from CRI.
|
||||
func NewCRIStatsProvider(
|
||||
cadvisor cadvisor.Interface,
|
||||
resourceAnalyzer stats.ResourceAnalyzer,
|
||||
podManager kubepod.Manager,
|
||||
runtimeCache kubecontainer.RuntimeCache,
|
||||
runtimeService internalapi.RuntimeService,
|
||||
imageService internalapi.ImageManagerService,
|
||||
) *StatsProvider {
|
||||
return newStatsProvider(cadvisor, podManager, runtimeCache, newCRIStatsProvider(cadvisor, resourceAnalyzer, runtimeService, imageService))
|
||||
}
|
||||
|
||||
// NewCadvisorStatsProvider returns a containerStatsProvider that provides both
|
||||
// the node and the container stats from cAdvisor.
|
||||
func NewCadvisorStatsProvider(
|
||||
cadvisor cadvisor.Interface,
|
||||
resourceAnalyzer stats.ResourceAnalyzer,
|
||||
podManager kubepod.Manager,
|
||||
runtimeCache kubecontainer.RuntimeCache,
|
||||
imageService kubecontainer.ImageService,
|
||||
) *StatsProvider {
|
||||
return newStatsProvider(cadvisor, podManager, runtimeCache, newCadvisorStatsProvider(cadvisor, resourceAnalyzer, imageService))
|
||||
}
|
||||
|
||||
// newStatsProvider returns a new StatsProvider that provides node stats from
|
||||
// cAdvisor and the container stats using the containerStatsProvider.
|
||||
func newStatsProvider(
|
||||
cadvisor cadvisor.Interface,
|
||||
podManager kubepod.Manager,
|
||||
runtimeCache kubecontainer.RuntimeCache,
|
||||
containerStatsProvider containerStatsProvider,
|
||||
) *StatsProvider {
|
||||
return &StatsProvider{
|
||||
cadvisor: cadvisor,
|
||||
podManager: podManager,
|
||||
runtimeCache: runtimeCache,
|
||||
containerStatsProvider: containerStatsProvider,
|
||||
}
|
||||
}
|
||||
|
||||
// StatsProvider provides the stats of the node and the pod-managed containers.
|
||||
type StatsProvider struct {
|
||||
cadvisor cadvisor.Interface
|
||||
podManager kubepod.Manager
|
||||
runtimeCache kubecontainer.RuntimeCache
|
||||
containerStatsProvider
|
||||
}
|
||||
|
||||
// containerStatsProvider is an interface that provides the stats of the
|
||||
// containers managed by pods.
|
||||
type containerStatsProvider interface {
|
||||
ListPodStats() ([]statsapi.PodStats, error)
|
||||
ImageFsStats() (*statsapi.FsStats, error)
|
||||
}
|
||||
|
||||
// GetCgroupStats returns the stats of the cgroup with the cgroupName.
|
||||
func (p *StatsProvider) GetCgroupStats(cgroupName string) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) {
|
||||
info, err := getCgroupInfo(p.cadvisor, cgroupName)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get cgroup stats for %q: %v", cgroupName, err)
|
||||
}
|
||||
rootFsInfo, err := p.cadvisor.RootFsInfo()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get rootFs info: %v", err)
|
||||
}
|
||||
imageFsInfo, err := p.cadvisor.ImagesFsInfo()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get imageFs info: %v", err)
|
||||
}
|
||||
s := cadvisorInfoToContainerStats(cgroupName, info, &rootFsInfo, &imageFsInfo)
|
||||
n := cadvisorInfoToNetworkStats(cgroupName, info)
|
||||
return s, n, nil
|
||||
}
|
||||
|
||||
// RootFsStats returns the stats of the node root filesystem.
|
||||
func (p *StatsProvider) RootFsStats() (*statsapi.FsStats, error) {
|
||||
rootFsInfo, err := p.cadvisor.RootFsInfo()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get rootFs info: %v", err)
|
||||
}
|
||||
|
||||
var nodeFsInodesUsed *uint64
|
||||
if rootFsInfo.Inodes != nil && rootFsInfo.InodesFree != nil {
|
||||
nodeFsIU := *rootFsInfo.Inodes - *rootFsInfo.InodesFree
|
||||
nodeFsInodesUsed = &nodeFsIU
|
||||
}
|
||||
|
||||
// Get the root container stats's timestamp, which will be used as the
|
||||
// imageFs stats timestamp.
|
||||
rootStats, err := getCgroupStats(p.cadvisor, "/")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get root container stats: %v", err)
|
||||
}
|
||||
|
||||
return &statsapi.FsStats{
|
||||
Time: metav1.NewTime(rootStats.Timestamp),
|
||||
AvailableBytes: &rootFsInfo.Available,
|
||||
CapacityBytes: &rootFsInfo.Capacity,
|
||||
UsedBytes: &rootFsInfo.Usage,
|
||||
InodesFree: rootFsInfo.InodesFree,
|
||||
Inodes: rootFsInfo.Inodes,
|
||||
InodesUsed: nodeFsInodesUsed,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetContainerInfo returns stats (from cAdvisor) for a container.
|
||||
func (p *StatsProvider) GetContainerInfo(podFullName string, podUID types.UID, containerName string, req *cadvisorapiv1.ContainerInfoRequest) (*cadvisorapiv1.ContainerInfo, error) {
|
||||
// Resolve and type convert back again.
|
||||
// We need the static pod UID but the kubecontainer API works with types.UID.
|
||||
podUID = types.UID(p.podManager.TranslatePodUID(podUID))
|
||||
|
||||
pods, err := p.runtimeCache.GetPods()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
|
||||
container := pod.FindContainerByName(containerName)
|
||||
if container == nil {
|
||||
return nil, kubecontainer.ErrContainerNotFound
|
||||
}
|
||||
|
||||
ci, err := p.cadvisor.DockerContainer(container.ID.ID, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ci, nil
|
||||
}
|
||||
|
||||
// GetRawContainerInfo returns the stats (from cadvisor) for a non-Kubernetes
|
||||
// container.
|
||||
func (p *StatsProvider) GetRawContainerInfo(containerName string, req *cadvisorapiv1.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapiv1.ContainerInfo, error) {
|
||||
if subcontainers {
|
||||
return p.cadvisor.SubcontainerInfo(containerName, req)
|
||||
}
|
||||
containerInfo, err := p.cadvisor.ContainerInfo(containerName, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]*cadvisorapiv1.ContainerInfo{
|
||||
containerInfo.Name: containerInfo,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,579 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
fuzz "github.com/google/gofuzz"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
kubepodtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// Offsets from seed value in generated container stats.
|
||||
offsetCPUUsageCores = iota
|
||||
offsetCPUUsageCoreSeconds
|
||||
offsetMemPageFaults
|
||||
offsetMemMajorPageFaults
|
||||
offsetMemUsageBytes
|
||||
offsetMemRSSBytes
|
||||
offsetMemWorkingSetBytes
|
||||
offsetNetRxBytes
|
||||
offsetNetRxErrors
|
||||
offsetNetTxBytes
|
||||
offsetNetTxErrors
|
||||
offsetFsCapacity
|
||||
offsetFsAvailable
|
||||
offsetFsUsage
|
||||
offsetFsInodes
|
||||
offsetFsInodesFree
|
||||
offsetFsTotalUsageBytes
|
||||
offsetFsBaseUsageBytes
|
||||
offsetFsInodeUsage
|
||||
)
|
||||
|
||||
var (
|
||||
timestamp = time.Now()
|
||||
creationTime = timestamp.Add(-5 * time.Minute)
|
||||
)
|
||||
|
||||
func TestGetCgroupStats(t *testing.T) {
|
||||
const (
|
||||
cgroupName = "test-cgroup-name"
|
||||
rootFsInfoSeed = 1000
|
||||
imageFsInfoSeed = 2000
|
||||
containerInfoSeed = 3000
|
||||
)
|
||||
var (
|
||||
mockCadvisor = new(cadvisortest.Mock)
|
||||
mockPodManager = new(kubepodtest.MockManager)
|
||||
mockRuntimeCache = new(kubecontainertest.MockRuntimeCache)
|
||||
|
||||
assert = assert.New(t)
|
||||
options = cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false}
|
||||
|
||||
rootFsInfo = getTestFsInfo(rootFsInfoSeed)
|
||||
imageFsInfo = getTestFsInfo(imageFsInfoSeed)
|
||||
containerInfo = getTestContainerInfo(containerInfoSeed, "test-pod", "test-ns", "test-container")
|
||||
containerInfoMap = map[string]cadvisorapiv2.ContainerInfo{cgroupName: containerInfo}
|
||||
)
|
||||
|
||||
mockCadvisor.
|
||||
On("RootFsInfo").Return(rootFsInfo, nil).
|
||||
On("ImagesFsInfo").Return(imageFsInfo, nil).
|
||||
On("ContainerInfoV2", cgroupName, options).Return(containerInfoMap, nil)
|
||||
|
||||
provider := newStatsProvider(mockCadvisor, mockPodManager, mockRuntimeCache, fakeContainerStatsProvider{})
|
||||
cs, ns, err := provider.GetCgroupStats(cgroupName)
|
||||
assert.NoError(err)
|
||||
|
||||
checkCPUStats(t, "", containerInfoSeed, cs.CPU)
|
||||
checkMemoryStats(t, "", containerInfoSeed, containerInfo, cs.Memory)
|
||||
checkFsStats(t, "", imageFsInfoSeed, cs.Rootfs)
|
||||
checkFsStats(t, "", rootFsInfoSeed, cs.Logs)
|
||||
checkNetworkStats(t, "", containerInfoSeed, ns)
|
||||
|
||||
assert.Equal(cs.Name, cgroupName)
|
||||
assert.Equal(cs.StartTime, metav1.NewTime(containerInfo.Spec.CreationTime))
|
||||
|
||||
assert.Equal(cs.Rootfs.Time, metav1.NewTime(containerInfo.Stats[0].Timestamp))
|
||||
assert.Equal(*cs.Rootfs.UsedBytes, *containerInfo.Stats[0].Filesystem.BaseUsageBytes)
|
||||
assert.Equal(*cs.Rootfs.InodesUsed, *containerInfo.Stats[0].Filesystem.InodeUsage)
|
||||
|
||||
assert.Equal(cs.Logs.Time, metav1.NewTime(containerInfo.Stats[0].Timestamp))
|
||||
assert.Equal(*cs.Logs.UsedBytes, *containerInfo.Stats[0].Filesystem.TotalUsageBytes-*containerInfo.Stats[0].Filesystem.BaseUsageBytes)
|
||||
assert.Equal(*cs.Logs.InodesUsed, *rootFsInfo.Inodes-*rootFsInfo.InodesFree)
|
||||
|
||||
mockCadvisor.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestRootFsStats(t *testing.T) {
|
||||
const (
|
||||
rootFsInfoSeed = 1000
|
||||
containerInfoSeed = 2000
|
||||
)
|
||||
var (
|
||||
mockCadvisor = new(cadvisortest.Mock)
|
||||
mockPodManager = new(kubepodtest.MockManager)
|
||||
mockRuntimeCache = new(kubecontainertest.MockRuntimeCache)
|
||||
|
||||
assert = assert.New(t)
|
||||
options = cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false}
|
||||
|
||||
rootFsInfo = getTestFsInfo(rootFsInfoSeed)
|
||||
containerInfo = getTestContainerInfo(containerInfoSeed, "test-pod", "test-ns", "test-container")
|
||||
containerInfoMap = map[string]cadvisorapiv2.ContainerInfo{"/": containerInfo}
|
||||
)
|
||||
|
||||
mockCadvisor.
|
||||
On("RootFsInfo").Return(rootFsInfo, nil).
|
||||
On("ContainerInfoV2", "/", options).Return(containerInfoMap, nil)
|
||||
|
||||
provider := newStatsProvider(mockCadvisor, mockPodManager, mockRuntimeCache, fakeContainerStatsProvider{})
|
||||
stats, err := provider.RootFsStats()
|
||||
assert.NoError(err)
|
||||
|
||||
checkFsStats(t, "", rootFsInfoSeed, stats)
|
||||
|
||||
assert.Equal(stats.Time, metav1.NewTime(containerInfo.Stats[0].Timestamp))
|
||||
assert.Equal(*stats.UsedBytes, rootFsInfo.Usage)
|
||||
assert.Equal(*stats.InodesUsed, *rootFsInfo.Inodes-*rootFsInfo.InodesFree)
|
||||
|
||||
mockCadvisor.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestGetContainerInfo(t *testing.T) {
|
||||
cadvisorAPIFailure := fmt.Errorf("cAdvisor failure")
|
||||
runtimeError := fmt.Errorf("List containers error")
|
||||
tests := []struct {
|
||||
name string
|
||||
containerID string
|
||||
containerPath string
|
||||
cadvisorContainerInfo cadvisorapiv1.ContainerInfo
|
||||
runtimeError error
|
||||
podList []*kubecontainer.Pod
|
||||
requestedPodFullName string
|
||||
requestedPodUID types.UID
|
||||
requestedContainerName string
|
||||
expectDockerContainerCall bool
|
||||
mockError error
|
||||
expectedError error
|
||||
expectStats bool
|
||||
}{
|
||||
{
|
||||
name: "get container info",
|
||||
containerID: "ab2cdf",
|
||||
containerPath: "/docker/ab2cdf",
|
||||
cadvisorContainerInfo: cadvisorapiv1.ContainerInfo{
|
||||
ContainerReference: cadvisorapiv1.ContainerReference{
|
||||
Name: "/docker/ab2cdf",
|
||||
},
|
||||
},
|
||||
runtimeError: nil,
|
||||
podList: []*kubecontainer.Pod{
|
||||
{
|
||||
ID: "12345678",
|
||||
Name: "qux",
|
||||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "ab2cdf"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
requestedPodFullName: "qux_ns",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
expectDockerContainerCall: true,
|
||||
mockError: nil,
|
||||
expectedError: nil,
|
||||
expectStats: true,
|
||||
},
|
||||
{
|
||||
name: "get container info when cadvisor failed",
|
||||
containerID: "ab2cdf",
|
||||
containerPath: "/docker/ab2cdf",
|
||||
cadvisorContainerInfo: cadvisorapiv1.ContainerInfo{},
|
||||
runtimeError: nil,
|
||||
podList: []*kubecontainer.Pod{
|
||||
{
|
||||
ID: "uuid",
|
||||
Name: "qux",
|
||||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "ab2cdf"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
requestedPodFullName: "qux_ns",
|
||||
requestedPodUID: "uuid",
|
||||
requestedContainerName: "foo",
|
||||
expectDockerContainerCall: true,
|
||||
mockError: cadvisorAPIFailure,
|
||||
expectedError: cadvisorAPIFailure,
|
||||
expectStats: false,
|
||||
},
|
||||
{
|
||||
name: "get container info on non-existent container",
|
||||
containerID: "",
|
||||
containerPath: "",
|
||||
cadvisorContainerInfo: cadvisorapiv1.ContainerInfo{},
|
||||
runtimeError: nil,
|
||||
podList: []*kubecontainer.Pod{},
|
||||
requestedPodFullName: "qux",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
expectDockerContainerCall: false,
|
||||
mockError: nil,
|
||||
expectedError: kubecontainer.ErrContainerNotFound,
|
||||
expectStats: false,
|
||||
},
|
||||
{
|
||||
name: "get container info when container runtime failed",
|
||||
containerID: "",
|
||||
containerPath: "",
|
||||
cadvisorContainerInfo: cadvisorapiv1.ContainerInfo{},
|
||||
runtimeError: runtimeError,
|
||||
podList: []*kubecontainer.Pod{},
|
||||
requestedPodFullName: "qux",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
mockError: nil,
|
||||
expectedError: runtimeError,
|
||||
expectStats: false,
|
||||
},
|
||||
{
|
||||
name: "get container info with no containers",
|
||||
containerID: "",
|
||||
containerPath: "",
|
||||
cadvisorContainerInfo: cadvisorapiv1.ContainerInfo{},
|
||||
runtimeError: nil,
|
||||
podList: []*kubecontainer.Pod{},
|
||||
requestedPodFullName: "qux_ns",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
mockError: nil,
|
||||
expectedError: kubecontainer.ErrContainerNotFound,
|
||||
expectStats: false,
|
||||
},
|
||||
{
|
||||
name: "get container info with no matching containers",
|
||||
containerID: "",
|
||||
containerPath: "",
|
||||
cadvisorContainerInfo: cadvisorapiv1.ContainerInfo{},
|
||||
runtimeError: nil,
|
||||
podList: []*kubecontainer.Pod{
|
||||
{
|
||||
ID: "12345678",
|
||||
Name: "qux",
|
||||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
Name: "bar",
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "fakeID"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
requestedPodFullName: "qux_ns",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
mockError: nil,
|
||||
expectedError: kubecontainer.ErrContainerNotFound,
|
||||
expectStats: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
var (
|
||||
mockCadvisor = new(cadvisortest.Mock)
|
||||
mockPodManager = new(kubepodtest.MockManager)
|
||||
mockRuntimeCache = new(kubecontainertest.MockRuntimeCache)
|
||||
|
||||
cadvisorReq = &cadvisorapiv1.ContainerInfoRequest{}
|
||||
)
|
||||
|
||||
mockPodManager.On("TranslatePodUID", tc.requestedPodUID).Return(kubetypes.ResolvedPodUID(tc.requestedPodUID))
|
||||
mockRuntimeCache.On("GetPods").Return(tc.podList, tc.runtimeError)
|
||||
if tc.expectDockerContainerCall {
|
||||
mockCadvisor.On("DockerContainer", tc.containerID, cadvisorReq).Return(tc.cadvisorContainerInfo, tc.mockError)
|
||||
}
|
||||
|
||||
provider := newStatsProvider(mockCadvisor, mockPodManager, mockRuntimeCache, fakeContainerStatsProvider{})
|
||||
stats, err := provider.GetContainerInfo(tc.requestedPodFullName, tc.requestedPodUID, tc.requestedContainerName, cadvisorReq)
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
|
||||
if tc.expectStats {
|
||||
require.NotNil(t, stats)
|
||||
}
|
||||
mockCadvisor.AssertExpectations(t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRawContainerInfoRoot(t *testing.T) {
|
||||
var (
|
||||
mockCadvisor = new(cadvisortest.Mock)
|
||||
mockPodManager = new(kubepodtest.MockManager)
|
||||
mockRuntimeCache = new(kubecontainertest.MockRuntimeCache)
|
||||
|
||||
cadvisorReq = &cadvisorapiv1.ContainerInfoRequest{}
|
||||
containerPath = "/"
|
||||
containerInfo = &cadvisorapiv1.ContainerInfo{
|
||||
ContainerReference: cadvisorapiv1.ContainerReference{
|
||||
Name: containerPath,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
mockCadvisor.On("ContainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil)
|
||||
|
||||
provider := newStatsProvider(mockCadvisor, mockPodManager, mockRuntimeCache, fakeContainerStatsProvider{})
|
||||
_, err := provider.GetRawContainerInfo(containerPath, cadvisorReq, false)
|
||||
assert.NoError(t, err)
|
||||
mockCadvisor.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestGetRawContainerInfoSubcontainers(t *testing.T) {
|
||||
var (
|
||||
mockCadvisor = new(cadvisortest.Mock)
|
||||
mockPodManager = new(kubepodtest.MockManager)
|
||||
mockRuntimeCache = new(kubecontainertest.MockRuntimeCache)
|
||||
|
||||
cadvisorReq = &cadvisorapiv1.ContainerInfoRequest{}
|
||||
containerPath = "/kubelet"
|
||||
containerInfo = map[string]*cadvisorapiv1.ContainerInfo{
|
||||
containerPath: {
|
||||
ContainerReference: cadvisorapiv1.ContainerReference{
|
||||
Name: containerPath,
|
||||
},
|
||||
},
|
||||
"/kubelet/sub": {
|
||||
ContainerReference: cadvisorapiv1.ContainerReference{
|
||||
Name: "/kubelet/sub",
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
mockCadvisor.On("SubcontainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil)
|
||||
|
||||
provider := newStatsProvider(mockCadvisor, mockPodManager, mockRuntimeCache, fakeContainerStatsProvider{})
|
||||
result, err := provider.GetRawContainerInfo(containerPath, cadvisorReq, true)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, result, 2)
|
||||
mockCadvisor.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func getTerminatedContainerInfo(seed int, podName string, podNamespace string, containerName string) cadvisorapiv2.ContainerInfo {
|
||||
cinfo := getTestContainerInfo(seed, podName, podNamespace, containerName)
|
||||
cinfo.Stats[0].Memory.RSS = 0
|
||||
cinfo.Stats[0].CpuInst.Usage.Total = 0
|
||||
return cinfo
|
||||
}
|
||||
|
||||
func getTestContainerInfo(seed int, podName string, podNamespace string, containerName string) cadvisorapiv2.ContainerInfo {
|
||||
labels := map[string]string{}
|
||||
if podName != "" {
|
||||
labels = map[string]string{
|
||||
"io.kubernetes.pod.name": podName,
|
||||
"io.kubernetes.pod.uid": "UID" + podName,
|
||||
"io.kubernetes.pod.namespace": podNamespace,
|
||||
"io.kubernetes.container.name": containerName,
|
||||
}
|
||||
}
|
||||
// by default, kernel will set memory.limit_in_bytes to 1 << 63 if not bounded
|
||||
unlimitedMemory := uint64(1 << 63)
|
||||
spec := cadvisorapiv2.ContainerSpec{
|
||||
CreationTime: testTime(creationTime, seed),
|
||||
HasCpu: true,
|
||||
HasMemory: true,
|
||||
HasNetwork: true,
|
||||
Labels: labels,
|
||||
Memory: cadvisorapiv2.MemorySpec{
|
||||
Limit: unlimitedMemory,
|
||||
},
|
||||
CustomMetrics: generateCustomMetricSpec(),
|
||||
}
|
||||
|
||||
totalUsageBytes := uint64(seed + offsetFsTotalUsageBytes)
|
||||
baseUsageBytes := uint64(seed + offsetFsBaseUsageBytes)
|
||||
inodeUsage := uint64(seed + offsetFsInodeUsage)
|
||||
|
||||
stats := cadvisorapiv2.ContainerStats{
|
||||
Timestamp: testTime(timestamp, seed),
|
||||
Cpu: &cadvisorapiv1.CpuStats{},
|
||||
CpuInst: &cadvisorapiv2.CpuInstStats{},
|
||||
Memory: &cadvisorapiv1.MemoryStats{
|
||||
Usage: uint64(seed + offsetMemUsageBytes),
|
||||
WorkingSet: uint64(seed + offsetMemWorkingSetBytes),
|
||||
RSS: uint64(seed + offsetMemRSSBytes),
|
||||
ContainerData: cadvisorapiv1.MemoryStatsMemoryData{
|
||||
Pgfault: uint64(seed + offsetMemPageFaults),
|
||||
Pgmajfault: uint64(seed + offsetMemMajorPageFaults),
|
||||
},
|
||||
},
|
||||
Network: &cadvisorapiv2.NetworkStats{
|
||||
Interfaces: []cadvisorapiv1.InterfaceStats{{
|
||||
Name: "eth0",
|
||||
RxBytes: uint64(seed + offsetNetRxBytes),
|
||||
RxErrors: uint64(seed + offsetNetRxErrors),
|
||||
TxBytes: uint64(seed + offsetNetTxBytes),
|
||||
TxErrors: uint64(seed + offsetNetTxErrors),
|
||||
}, {
|
||||
Name: "cbr0",
|
||||
RxBytes: 100,
|
||||
RxErrors: 100,
|
||||
TxBytes: 100,
|
||||
TxErrors: 100,
|
||||
}},
|
||||
},
|
||||
CustomMetrics: generateCustomMetrics(spec.CustomMetrics),
|
||||
Filesystem: &cadvisorapiv2.FilesystemStats{
|
||||
TotalUsageBytes: &totalUsageBytes,
|
||||
BaseUsageBytes: &baseUsageBytes,
|
||||
InodeUsage: &inodeUsage,
|
||||
},
|
||||
}
|
||||
stats.Cpu.Usage.Total = uint64(seed + offsetCPUUsageCoreSeconds)
|
||||
stats.CpuInst.Usage.Total = uint64(seed + offsetCPUUsageCores)
|
||||
return cadvisorapiv2.ContainerInfo{
|
||||
Spec: spec,
|
||||
Stats: []*cadvisorapiv2.ContainerStats{&stats},
|
||||
}
|
||||
}
|
||||
|
||||
func getTestFsInfo(seed int) cadvisorapiv2.FsInfo {
|
||||
var (
|
||||
inodes = uint64(seed + offsetFsInodes)
|
||||
inodesFree = uint64(seed + offsetFsInodesFree)
|
||||
)
|
||||
return cadvisorapiv2.FsInfo{
|
||||
Device: "test-device",
|
||||
Mountpoint: "test-mount-point",
|
||||
Capacity: uint64(seed + offsetFsCapacity),
|
||||
Available: uint64(seed + offsetFsAvailable),
|
||||
Usage: uint64(seed + offsetFsUsage),
|
||||
Inodes: &inodes,
|
||||
InodesFree: &inodesFree,
|
||||
}
|
||||
}
|
||||
|
||||
func generateCustomMetricSpec() []cadvisorapiv1.MetricSpec {
|
||||
f := fuzz.New().NilChance(0).Funcs(
|
||||
func(e *cadvisorapiv1.MetricSpec, c fuzz.Continue) {
|
||||
c.Fuzz(&e.Name)
|
||||
switch c.Intn(3) {
|
||||
case 0:
|
||||
e.Type = cadvisorapiv1.MetricGauge
|
||||
case 1:
|
||||
e.Type = cadvisorapiv1.MetricCumulative
|
||||
case 2:
|
||||
e.Type = cadvisorapiv1.MetricDelta
|
||||
}
|
||||
switch c.Intn(2) {
|
||||
case 0:
|
||||
e.Format = cadvisorapiv1.IntType
|
||||
case 1:
|
||||
e.Format = cadvisorapiv1.FloatType
|
||||
}
|
||||
c.Fuzz(&e.Units)
|
||||
})
|
||||
var ret []cadvisorapiv1.MetricSpec
|
||||
f.Fuzz(&ret)
|
||||
return ret
|
||||
}
|
||||
|
||||
func generateCustomMetrics(spec []cadvisorapiv1.MetricSpec) map[string][]cadvisorapiv1.MetricVal {
|
||||
ret := map[string][]cadvisorapiv1.MetricVal{}
|
||||
for _, metricSpec := range spec {
|
||||
f := fuzz.New().NilChance(0).Funcs(
|
||||
func(e *cadvisorapiv1.MetricVal, c fuzz.Continue) {
|
||||
switch metricSpec.Format {
|
||||
case cadvisorapiv1.IntType:
|
||||
c.Fuzz(&e.IntValue)
|
||||
case cadvisorapiv1.FloatType:
|
||||
c.Fuzz(&e.FloatValue)
|
||||
}
|
||||
})
|
||||
|
||||
var metrics []cadvisorapiv1.MetricVal
|
||||
f.Fuzz(&metrics)
|
||||
ret[metricSpec.Name] = metrics
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func testTime(base time.Time, seed int) time.Time {
|
||||
return base.Add(time.Duration(seed) * time.Second)
|
||||
}
|
||||
|
||||
func checkNetworkStats(t *testing.T, label string, seed int, stats *statsapi.NetworkStats) {
|
||||
assert.NotNil(t, stats)
|
||||
assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".Net.Time")
|
||||
assert.EqualValues(t, seed+offsetNetRxBytes, *stats.RxBytes, label+".Net.RxBytes")
|
||||
assert.EqualValues(t, seed+offsetNetRxErrors, *stats.RxErrors, label+".Net.RxErrors")
|
||||
assert.EqualValues(t, seed+offsetNetTxBytes, *stats.TxBytes, label+".Net.TxBytes")
|
||||
assert.EqualValues(t, seed+offsetNetTxErrors, *stats.TxErrors, label+".Net.TxErrors")
|
||||
}
|
||||
|
||||
func checkCPUStats(t *testing.T, label string, seed int, stats *statsapi.CPUStats) {
|
||||
assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".CPU.Time")
|
||||
assert.EqualValues(t, seed+offsetCPUUsageCores, *stats.UsageNanoCores, label+".CPU.UsageCores")
|
||||
assert.EqualValues(t, seed+offsetCPUUsageCoreSeconds, *stats.UsageCoreNanoSeconds, label+".CPU.UsageCoreSeconds")
|
||||
}
|
||||
|
||||
func checkMemoryStats(t *testing.T, label string, seed int, info cadvisorapiv2.ContainerInfo, stats *statsapi.MemoryStats) {
|
||||
assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".Mem.Time")
|
||||
assert.EqualValues(t, seed+offsetMemUsageBytes, *stats.UsageBytes, label+".Mem.UsageBytes")
|
||||
assert.EqualValues(t, seed+offsetMemWorkingSetBytes, *stats.WorkingSetBytes, label+".Mem.WorkingSetBytes")
|
||||
assert.EqualValues(t, seed+offsetMemRSSBytes, *stats.RSSBytes, label+".Mem.RSSBytes")
|
||||
assert.EqualValues(t, seed+offsetMemPageFaults, *stats.PageFaults, label+".Mem.PageFaults")
|
||||
assert.EqualValues(t, seed+offsetMemMajorPageFaults, *stats.MajorPageFaults, label+".Mem.MajorPageFaults")
|
||||
if !info.Spec.HasMemory || isMemoryUnlimited(info.Spec.Memory.Limit) {
|
||||
assert.Nil(t, stats.AvailableBytes, label+".Mem.AvailableBytes")
|
||||
} else {
|
||||
expected := info.Spec.Memory.Limit - *stats.WorkingSetBytes
|
||||
assert.EqualValues(t, expected, *stats.AvailableBytes, label+".Mem.AvailableBytes")
|
||||
}
|
||||
}
|
||||
|
||||
func checkFsStats(t *testing.T, label string, seed int, stats *statsapi.FsStats) {
|
||||
assert.EqualValues(t, seed+offsetFsCapacity, *stats.CapacityBytes, label+".CapacityBytes")
|
||||
assert.EqualValues(t, seed+offsetFsAvailable, *stats.AvailableBytes, label+".AvailableBytes")
|
||||
assert.EqualValues(t, seed+offsetFsInodes, *stats.Inodes, label+".Inodes")
|
||||
assert.EqualValues(t, seed+offsetFsInodesFree, *stats.InodesFree, label+".InodesFree")
|
||||
}
|
||||
|
||||
type fakeResourceAnalyzer struct {
|
||||
podVolumeStats serverstats.PodVolumeStats
|
||||
}
|
||||
|
||||
func (o *fakeResourceAnalyzer) Start() {}
|
||||
func (o *fakeResourceAnalyzer) Get() (*statsapi.Summary, error) { return nil, nil }
|
||||
func (o *fakeResourceAnalyzer) GetPodVolumeStats(uid types.UID) (serverstats.PodVolumeStats, bool) {
|
||||
return o.podVolumeStats, true
|
||||
}
|
||||
|
||||
type fakeContainerStatsProvider struct {
|
||||
}
|
||||
|
||||
func (p fakeContainerStatsProvider) ListPodStats() ([]statsapi.PodStats, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (p fakeContainerStatsProvider) ImageFsStats() (*statsapi.FsStats, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
Loading…
Reference in New Issue