Merge pull request #74441 from Random-Liu/pod-log-directory

Add namespace and name into the CRI pod log directory
pull/564/head
Kubernetes Prow Robot 2019-03-08 20:43:30 -08:00 committed by GitHub
commit b150560107
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 237 additions and 105 deletions

View File

@ -711,7 +711,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.runtimeCache,
runtimeService,
imageService,
stats.NewLogMetricsService())
stats.NewLogMetricsService(),
kubecontainer.RealOS{})
}
klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, clock.RealClock{})

View File

@ -163,24 +163,31 @@ func getStableKey(pod *v1.Pod, container *v1.Container) string {
return fmt.Sprintf("%s_%s_%s_%s_%s", pod.Name, pod.Namespace, string(pod.UID), container.Name, hash)
}
// logPathDelimiter is the delimiter used in the log path.
const logPathDelimiter = "_"
// buildContainerLogsPath builds log path for container relative to pod logs directory.
func buildContainerLogsPath(containerName string, restartCount int) string {
return filepath.Join(containerName, fmt.Sprintf("%d.log", restartCount))
}
// buildFullContainerLogsPath builds absolute log path for container.
func buildFullContainerLogsPath(podUID types.UID, containerName string, restartCount int) string {
return filepath.Join(buildPodLogsDirectory(podUID), buildContainerLogsPath(containerName, restartCount))
}
// BuildContainerLogsDirectory builds absolute log directory path for a container in pod.
func BuildContainerLogsDirectory(podUID types.UID, containerName string) string {
return filepath.Join(buildPodLogsDirectory(podUID), containerName)
func BuildContainerLogsDirectory(podNamespace, podName string, podUID types.UID, containerName string) string {
return filepath.Join(BuildPodLogsDirectory(podNamespace, podName, podUID), containerName)
}
// buildPodLogsDirectory builds absolute log directory path for a pod sandbox.
func buildPodLogsDirectory(podUID types.UID) string {
return filepath.Join(podLogsRootDirectory, string(podUID))
// BuildPodLogsDirectory builds absolute log directory path for a pod sandbox.
func BuildPodLogsDirectory(podNamespace, podName string, podUID types.UID) string {
return filepath.Join(podLogsRootDirectory, strings.Join([]string{podNamespace, podName,
string(podUID)}, logPathDelimiter))
}
// parsePodUIDFromLogsDirectory parses pod logs directory name and returns the pod UID.
// It supports both the old pod log directory /var/log/pods/UID, and the new pod log
// directory /var/log/pods/NAMESPACE_NAME_UID.
func parsePodUIDFromLogsDirectory(name string) types.UID {
parts := strings.Split(name, logPathDelimiter)
return types.UID(parts[len(parts)-1])
}
// toKubeRuntimeStatus converts the runtimeapi.RuntimeStatus to kubecontainer.RuntimeStatus.

View File

@ -205,7 +205,7 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
}
command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs)
logDir := BuildContainerLogsDirectory(kubetypes.UID(pod.UID), container.Name)
logDir := BuildContainerLogsDirectory(pod.Namespace, pod.Name, pod.UID, container.Name)
err = m.osInterface.MkdirAll(logDir, 0755)
if err != nil {
return nil, cleanupAction, fmt.Errorf("create container log directory for container %s failed: %v", container.Name, err)
@ -402,7 +402,6 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
if status.State == runtimeapi.ContainerState_CONTAINER_EXITED {
// Populate the termination message if needed.
annotatedInfo := getContainerInfoFromAnnotations(status.Annotations)
labeledInfo := getContainerInfoFromLabels(status.Labels)
fallbackToLogs := annotatedInfo.TerminationMessagePolicy == v1.TerminationMessageFallbackToLogsOnError && cStatus.ExitCode != 0
tMessage, checkLogs := getTerminationMessage(status, annotatedInfo.TerminationMessagePath, fallbackToLogs)
if checkLogs {
@ -413,8 +412,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
tMessage = fmt.Sprintf("Error reading termination message from logs: %v", err)
}
} else {
path := buildFullContainerLogsPath(uid, labeledInfo.ContainerName, annotatedInfo.RestartCount)
tMessage = m.readLastStringFromContainerLogs(path)
tMessage = m.readLastStringFromContainerLogs(status.GetLogPath())
}
}
// Use the termination message written by the application is not empty
@ -824,8 +822,7 @@ func (m *kubeGenericRuntimeManager) removeContainerLog(containerID string) error
return fmt.Errorf("failed to get container status %q: %v", containerID, err)
}
labeledInfo := getContainerInfoFromLabels(status.Labels)
annotatedInfo := getContainerInfoFromAnnotations(status.Annotations)
path := buildFullContainerLogsPath(labeledInfo.PodUID, labeledInfo.ContainerName, annotatedInfo.RestartCount)
path := status.GetLogPath()
if err := m.osInterface.Remove(path); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove container %q log %q: %v", containerID, path, err)
}

View File

@ -61,7 +61,7 @@ func TestRemoveContainer(t *testing.T) {
err = m.removeContainer(containerID)
assert.NoError(t, err)
// Verify container log is removed
expectedContainerLogPath := filepath.Join(podLogsRootDirectory, "12345678", "foo", "0.log")
expectedContainerLogPath := filepath.Join(podLogsRootDirectory, "new_bar_12345678", "foo", "0.log")
expectedContainerLogSymlink := legacyLogSymlink(containerID, "foo", "bar", "new")
assert.Equal(t, fakeOS.Removes, []string{expectedContainerLogPath, expectedContainerLogSymlink})
// Verify container is removed

View File

@ -339,7 +339,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
}
for _, dir := range dirs {
name := dir.Name()
podUID := types.UID(name)
podUID := parsePodUIDFromLogsDirectory(name)
if !cgc.podStateProvider.IsPodDeleted(podUID) {
continue
}

View File

@ -412,10 +412,16 @@ func TestPodLogDirectoryGC(t *testing.T) {
// pod log directories without corresponding pods should be removed.
podStateProvider.existingPods["123"] = struct{}{}
podStateProvider.existingPods["456"] = struct{}{}
podStateProvider.existingPods["321"] = struct{}{}
podStateProvider.runningPods["123"] = struct{}{}
podStateProvider.runningPods["456"] = struct{}{}
files := []string{"123", "456", "789", "012"}
removed := []string{filepath.Join(podLogsRootDirectory, "789"), filepath.Join(podLogsRootDirectory, "012")}
podStateProvider.existingPods["321"] = struct{}{}
files := []string{"123", "456", "789", "012", "name_namespace_321", "name_namespace_654"}
removed := []string{
filepath.Join(podLogsRootDirectory, "789"),
filepath.Join(podLogsRootDirectory, "012"),
filepath.Join(podLogsRootDirectory, "name_namespace_654"),
}
ctrl := gomock.NewController(t)
defer ctrl.Finish()

View File

@ -17,6 +17,7 @@ limitations under the License.
package kuberuntime
import (
"path/filepath"
"reflect"
"sort"
"testing"
@ -158,6 +159,7 @@ func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template cont
State: template.state,
Labels: containerConfig.Labels,
Annotations: containerConfig.Annotations,
LogPath: filepath.Join(sandboxConfig.GetLogDirectory(), containerConfig.GetLogPath()),
},
SandboxID: podSandboxID,
}

View File

@ -103,7 +103,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
podSandboxConfig.Hostname = hostname
}
logDir := buildPodLogsDirectory(pod.UID)
logDir := BuildPodLogsDirectory(pod.Namespace, pod.Name, pod.UID)
podSandboxConfig.LogDirectory = logDir
portMappings := []*runtimeapi.PortMapping{}

View File

@ -44,7 +44,7 @@ func TestCreatePodSandbox(t *testing.T) {
fakeOS := m.osInterface.(*containertest.FakeOS)
fakeOS.MkdirAllFn = func(path string, perm os.FileMode) error {
// Check pod logs root directory is created.
assert.Equal(t, filepath.Join(podLogsRootDirectory, "12345678"), path)
assert.Equal(t, filepath.Join(podLogsRootDirectory, pod.Namespace+"_"+pod.Name+"_12345678"), path)
assert.Equal(t, os.FileMode(0755), perm)
return nil
}

View File

@ -89,6 +89,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/github.com/golang/mock/gomock:go_default_library",
"//vendor/github.com/google/cadvisor/fs:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",

View File

@ -135,7 +135,7 @@ func (p *cadvisorStatsProvider) ListPodStats() ([]statsapi.PodStats, error) {
copy(ephemeralStats, vstats.EphemeralVolumes)
podStats.VolumeStats = append(vstats.EphemeralVolumes, vstats.PersistentVolumes...)
}
podStats.EphemeralStorage = calcEphemeralStorage(podStats.Containers, ephemeralStats, &rootFsInfo)
podStats.EphemeralStorage = calcEphemeralStorage(podStats.Containers, ephemeralStats, &rootFsInfo, nil, false)
// Lookup the pod-level cgroup's CPU and memory stats
podInfo := getCadvisorPodInfoFromPodUID(podUID, allInfos)
if podInfo != nil {
@ -225,53 +225,6 @@ func (p *cadvisorStatsProvider) ListPodCPUAndMemoryStats() ([]statsapi.PodStats,
return result, nil
}
func calcEphemeralStorage(containers []statsapi.ContainerStats, volumes []statsapi.VolumeStats, rootFsInfo *cadvisorapiv2.FsInfo) *statsapi.FsStats {
result := &statsapi.FsStats{
Time: metav1.NewTime(rootFsInfo.Timestamp),
AvailableBytes: &rootFsInfo.Available,
CapacityBytes: &rootFsInfo.Capacity,
InodesFree: rootFsInfo.InodesFree,
Inodes: rootFsInfo.Inodes,
}
for _, container := range containers {
addContainerUsage(result, &container)
}
for _, volume := range volumes {
result.UsedBytes = addUsage(result.UsedBytes, volume.FsStats.UsedBytes)
result.InodesUsed = addUsage(result.InodesUsed, volume.InodesUsed)
result.Time = maxUpdateTime(&result.Time, &volume.FsStats.Time)
}
return result
}
func addContainerUsage(stat *statsapi.FsStats, container *statsapi.ContainerStats) {
if rootFs := container.Rootfs; rootFs != nil {
stat.Time = maxUpdateTime(&stat.Time, &rootFs.Time)
stat.InodesUsed = addUsage(stat.InodesUsed, rootFs.InodesUsed)
stat.UsedBytes = addUsage(stat.UsedBytes, rootFs.UsedBytes)
if logs := container.Logs; logs != nil {
stat.UsedBytes = addUsage(stat.UsedBytes, logs.UsedBytes)
stat.Time = maxUpdateTime(&stat.Time, &logs.Time)
}
}
}
func maxUpdateTime(first, second *metav1.Time) metav1.Time {
if first.Before(second) {
return *second
}
return *first
}
func addUsage(first, second *uint64) *uint64 {
if first == nil {
return second
} else if second == nil {
return first
}
total := *first + *second
return &total
}
// ImageFsStats returns the stats of the filesystem for storing images.
func (p *cadvisorStatsProvider) ImageFsStats() (*statsapi.FsStats, error) {
imageFsInfo, err := p.cadvisor.ImagesFsInfo()

View File

@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"path"
"path/filepath"
"sort"
"strings"
"sync"
@ -34,6 +35,7 @@ import (
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
@ -66,6 +68,8 @@ type criStatsProvider struct {
imageService internalapi.ImageManagerService
// logMetrics provides the metrics for container logs
logMetricsService LogMetricsService
// osInterface is the interface for syscalls.
osInterface kubecontainer.OSInterface
// cpuUsageCache caches the cpu usage for containers.
cpuUsageCache map[string]*cpuUsageRecord
@ -80,6 +84,7 @@ func newCRIStatsProvider(
runtimeService internalapi.RuntimeService,
imageService internalapi.ImageManagerService,
logMetricsService LogMetricsService,
osInterface kubecontainer.OSInterface,
) containerStatsProvider {
return &criStatsProvider{
cadvisor: cadvisor,
@ -87,6 +92,7 @@ func newCRIStatsProvider(
runtimeService: runtimeService,
imageService: imageService,
logMetricsService: logMetricsService,
osInterface: osInterface,
cpuUsageCache: make(map[string]*cpuUsageRecord),
}
}
@ -189,7 +195,7 @@ func (p *criStatsProvider) listPodStats(updateCPUNanoCoreUsage bool) ([]statsapi
}
// Fill available stats for full set of required pod stats
cs := p.makeContainerStats(stats, container, &rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata().GetUid(), updateCPUNanoCoreUsage)
cs := p.makeContainerStats(stats, container, &rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata(), updateCPUNanoCoreUsage)
p.addPodNetworkStats(ps, podSandboxID, caInfos, cs, containerNetworkStats[podSandboxID])
p.addPodCPUMemoryStats(ps, types.UID(podSandbox.Metadata.Uid), allInfos, cs)
@ -384,15 +390,27 @@ func buildPodStats(podSandbox *runtimeapi.PodSandbox) *statsapi.PodStats {
}
}
func (p *criStatsProvider) makePodStorageStats(s *statsapi.PodStats, rootFsInfo *cadvisorapiv2.FsInfo) *statsapi.PodStats {
func (p *criStatsProvider) makePodStorageStats(s *statsapi.PodStats, rootFsInfo *cadvisorapiv2.FsInfo) {
podNs := s.PodRef.Namespace
podName := s.PodRef.Name
podUID := types.UID(s.PodRef.UID)
if vstats, found := p.resourceAnalyzer.GetPodVolumeStats(podUID); found {
ephemeralStats := make([]statsapi.VolumeStats, len(vstats.EphemeralVolumes))
copy(ephemeralStats, vstats.EphemeralVolumes)
s.VolumeStats = append(vstats.EphemeralVolumes, vstats.PersistentVolumes...)
s.EphemeralStorage = calcEphemeralStorage(s.Containers, ephemeralStats, rootFsInfo)
vstats, found := p.resourceAnalyzer.GetPodVolumeStats(podUID)
if !found {
return
}
return s
podLogDir := kuberuntime.BuildPodLogsDirectory(podNs, podName, podUID)
logStats, err := p.getPodLogStats(podLogDir, rootFsInfo)
if err != nil {
klog.Errorf("Unable to fetch pod log stats for path %s: %v ", podLogDir, err)
// If people do in-place upgrade, there might be pods still using
// the old log path. For those pods, no pod log stats is returned.
// We should continue generating other stats in that case.
// calcEphemeralStorage tolerants logStats == nil.
}
ephemeralStats := make([]statsapi.VolumeStats, len(vstats.EphemeralVolumes))
copy(ephemeralStats, vstats.EphemeralVolumes)
s.VolumeStats = append(vstats.EphemeralVolumes, vstats.PersistentVolumes...)
s.EphemeralStorage = calcEphemeralStorage(s.Containers, ephemeralStats, rootFsInfo, logStats, true)
}
func (p *criStatsProvider) addPodNetworkStats(
@ -476,7 +494,7 @@ func (p *criStatsProvider) makeContainerStats(
container *runtimeapi.Container,
rootFsInfo *cadvisorapiv2.FsInfo,
fsIDtoInfo map[runtimeapi.FilesystemIdentifier]*cadvisorapiv2.FsInfo,
uid string,
meta *runtimeapi.PodSandboxMetadata,
updateCPUNanoCoreUsage bool,
) *statsapi.ContainerStats {
result := &statsapi.ContainerStats{
@ -504,8 +522,8 @@ func (p *criStatsProvider) makeContainerStats(
}
} else {
result.CPU.Time = metav1.NewTime(time.Unix(0, time.Now().UnixNano()))
result.CPU.UsageCoreNanoSeconds = Uint64Ptr(0)
result.CPU.UsageNanoCores = Uint64Ptr(0)
result.CPU.UsageCoreNanoSeconds = uint64Ptr(0)
result.CPU.UsageNanoCores = uint64Ptr(0)
}
if stats.Memory != nil {
result.Memory.Time = metav1.NewTime(time.Unix(0, stats.Memory.Timestamp))
@ -514,7 +532,7 @@ func (p *criStatsProvider) makeContainerStats(
}
} else {
result.Memory.Time = metav1.NewTime(time.Unix(0, time.Now().UnixNano()))
result.Memory.WorkingSetBytes = Uint64Ptr(0)
result.Memory.WorkingSetBytes = uint64Ptr(0)
}
if stats.WritableLayer != nil {
result.Rootfs.Time = metav1.NewTime(time.Unix(0, stats.WritableLayer.Timestamp))
@ -543,8 +561,18 @@ func (p *criStatsProvider) makeContainerStats(
result.Rootfs.Inodes = imageFsInfo.Inodes
}
}
containerLogPath := kuberuntime.BuildContainerLogsDirectory(types.UID(uid), container.GetMetadata().GetName())
result.Logs = p.getContainerLogStats(containerLogPath, rootFsInfo)
// NOTE: This doesn't support the old pod log path, `/var/log/pods/UID`. For containers
// using old log path, empty log stats are returned. This is fine, because we don't
// officially support in-place upgrade anyway.
var (
containerLogPath = kuberuntime.BuildContainerLogsDirectory(meta.GetNamespace(),
meta.GetName(), types.UID(meta.GetUid()), container.GetMetadata().GetName())
err error
)
result.Logs, err = p.getPathFsStats(containerLogPath, rootFsInfo)
if err != nil {
klog.Errorf("Unable to fetch container log stats for path %s: %v ", containerLogPath, err)
}
return result
}
@ -572,8 +600,8 @@ func (p *criStatsProvider) makeContainerCPUAndMemoryStats(
}
} else {
result.CPU.Time = metav1.NewTime(time.Unix(0, time.Now().UnixNano()))
result.CPU.UsageCoreNanoSeconds = Uint64Ptr(0)
result.CPU.UsageNanoCores = Uint64Ptr(0)
result.CPU.UsageCoreNanoSeconds = uint64Ptr(0)
result.CPU.UsageNanoCores = uint64Ptr(0)
}
if stats.Memory != nil {
result.Memory.Time = metav1.NewTime(time.Unix(0, stats.Memory.Timestamp))
@ -582,7 +610,7 @@ func (p *criStatsProvider) makeContainerCPUAndMemoryStats(
}
} else {
result.Memory.Time = metav1.NewTime(time.Unix(0, time.Now().UnixNano()))
result.Memory.WorkingSetBytes = Uint64Ptr(0)
result.Memory.WorkingSetBytes = uint64Ptr(0)
}
return result
@ -735,13 +763,11 @@ func getCRICadvisorStats(infos map[string]cadvisorapiv2.ContainerInfo) map[strin
return stats
}
// TODO Cache the metrics in container log manager
func (p *criStatsProvider) getContainerLogStats(path string, rootFsInfo *cadvisorapiv2.FsInfo) *statsapi.FsStats {
func (p *criStatsProvider) getPathFsStats(path string, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) {
m := p.logMetricsService.createLogMetricsProvider(path)
logMetrics, err := m.GetMetrics()
if err != nil {
klog.Errorf("Unable to fetch container log stats for path %s: %v ", path, err)
return nil
return nil, err
}
result := &statsapi.FsStats{
Time: metav1.NewTime(rootFsInfo.Timestamp),
@ -754,5 +780,40 @@ func (p *criStatsProvider) getContainerLogStats(path string, rootFsInfo *cadviso
result.UsedBytes = &usedbytes
inodesUsed := uint64(logMetrics.InodesUsed.Value())
result.InodesUsed = &inodesUsed
return result
result.Time = maxUpdateTime(&result.Time, &logMetrics.Time)
return result, nil
}
// getPodLogStats gets stats for logs under the pod log directory. Container logs usually exist
// under the container log directory. However, for some container runtimes, e.g. kata, gvisor,
// they may want to keep some pod level logs, in that case they can put those logs directly under
// the pod log directory. And kubelet will take those logs into account as part of pod ephemeral
// storage.
func (p *criStatsProvider) getPodLogStats(path string, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) {
files, err := p.osInterface.ReadDir(path)
if err != nil {
return nil, err
}
result := &statsapi.FsStats{
Time: metav1.NewTime(rootFsInfo.Timestamp),
AvailableBytes: &rootFsInfo.Available,
CapacityBytes: &rootFsInfo.Capacity,
InodesFree: rootFsInfo.InodesFree,
Inodes: rootFsInfo.Inodes,
}
for _, f := range files {
if f.IsDir() {
continue
}
// Only include *files* under pod log directory.
fpath := filepath.Join(path, f.Name())
fstats, err := p.getPathFsStats(fpath, rootFsInfo)
if err != nil {
return nil, fmt.Errorf("failed to get fsstats for %q: %v", fpath, err)
}
result.UsedBytes = addUsage(result.UsedBytes, fstats.UsedBytes)
result.InodesUsed = addUsage(result.InodesUsed, fstats.InodesUsed)
result.Time = maxUpdateTime(&result.Time, &fstats.Time)
}
return result, nil
}

View File

@ -18,10 +18,13 @@ package stats
import (
"math/rand"
"os"
"path/filepath"
"runtime"
"testing"
"time"
gomock "github.com/golang/mock/gomock"
cadvisorfs "github.com/google/cadvisor/fs"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/stretchr/testify/assert"
@ -110,6 +113,11 @@ func TestCRIListPodStats(t *testing.T) {
container5 = makeFakeContainer(sandbox3, cName5, 0, true)
containerStats5 = makeFakeContainerStats(container5, imageFsMountpoint)
containerLogStats5 = makeFakeLogStats(5000)
podLogName0 = "pod-log-0"
podLogName1 = "pod-log-1"
podLogStats0 = makeFakeLogStats(5000)
podLogStats1 = makeFakeLogStats(6000)
)
var (
@ -166,14 +174,36 @@ func TestCRIListPodStats(t *testing.T) {
}
fakeLogStats := map[string]*volume.Metrics{
kuberuntime.BuildContainerLogsDirectory(types.UID("sandbox0-uid"), cName0): containerLogStats0,
kuberuntime.BuildContainerLogsDirectory(types.UID("sandbox0-uid"), cName1): containerLogStats1,
kuberuntime.BuildContainerLogsDirectory(types.UID("sandbox1-uid"), cName2): containerLogStats2,
kuberuntime.BuildContainerLogsDirectory(types.UID("sandbox2-uid"), cName3): containerLogStats4,
kuberuntime.BuildContainerLogsDirectory(types.UID("sandbox3-uid"), cName5): containerLogStats5,
kuberuntime.BuildContainerLogsDirectory("sandbox0-ns", "sandbox0-name", types.UID("sandbox0-uid"), cName0): containerLogStats0,
kuberuntime.BuildContainerLogsDirectory("sandbox0-ns", "sandbox0-name", types.UID("sandbox0-uid"), cName1): containerLogStats1,
kuberuntime.BuildContainerLogsDirectory("sandbox1-ns", "sandbox1-name", types.UID("sandbox1-uid"), cName2): containerLogStats2,
kuberuntime.BuildContainerLogsDirectory("sandbox2-ns", "sandbox2-name", types.UID("sandbox2-uid"), cName3): containerLogStats4,
kuberuntime.BuildContainerLogsDirectory("sandbox3-ns", "sandbox3-name", types.UID("sandbox3-uid"), cName5): containerLogStats5,
filepath.Join(kuberuntime.BuildPodLogsDirectory("sandbox0-ns", "sandbox0-name", types.UID("sandbox0-uid")), podLogName0): podLogStats0,
filepath.Join(kuberuntime.BuildPodLogsDirectory("sandbox1-ns", "sandbox1-name", types.UID("sandbox1-uid")), podLogName1): podLogStats1,
}
fakeLogStatsProvider := NewFakeLogMetricsService(fakeLogStats)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
fakeOS := &kubecontainertest.FakeOS{}
fakeOS.ReadDirFn = func(path string) ([]os.FileInfo, error) {
var fileInfos []os.FileInfo
mockFI := kubecontainertest.NewMockFileInfo(ctrl)
switch path {
case kuberuntime.BuildPodLogsDirectory("sandbox0-ns", "sandbox0-name", types.UID("sandbox0-uid")):
mockFI.EXPECT().Name().Return(podLogName0)
case kuberuntime.BuildPodLogsDirectory("sandbox1-ns", "sandbox1-name", types.UID("sandbox1-uid")):
mockFI.EXPECT().Name().Return(podLogName1)
default:
return nil, nil
}
mockFI.EXPECT().IsDir().Return(false)
fileInfos = append(fileInfos, mockFI)
return fileInfos, nil
}
provider := NewCRIStatsProvider(
mockCadvisor,
resourceAnalyzer,
@ -182,6 +212,7 @@ func TestCRIListPodStats(t *testing.T) {
fakeRuntimeService,
fakeImageService,
fakeLogStatsProvider,
fakeOS,
)
stats, err := provider.ListPodStats()
@ -199,7 +230,7 @@ func TestCRIListPodStats(t *testing.T) {
assert.Equal(2, len(p0.Containers))
checkEphemeralStorageStats(assert, p0, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats0, containerStats1},
[]*volume.Metrics{containerLogStats0, containerLogStats1})
[]*volume.Metrics{containerLogStats0, containerLogStats1}, podLogStats0)
containerStatsMap := make(map[string]statsapi.ContainerStats)
for _, s := range p0.Containers {
@ -223,7 +254,8 @@ func TestCRIListPodStats(t *testing.T) {
assert.Equal(sandbox1.CreatedAt, p1.StartTime.UnixNano())
assert.Equal(1, len(p1.Containers))
checkEphemeralStorageStats(assert, p1, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats2}, []*volume.Metrics{containerLogStats2})
checkEphemeralStorageStats(assert, p1, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats2},
[]*volume.Metrics{containerLogStats2}, podLogStats1)
c2 := p1.Containers[0]
assert.Equal(cName2, c2.Name)
assert.Equal(container2.CreatedAt, c2.StartTime.UnixNano())
@ -237,7 +269,8 @@ func TestCRIListPodStats(t *testing.T) {
assert.Equal(sandbox2.CreatedAt, p2.StartTime.UnixNano())
assert.Equal(1, len(p2.Containers))
checkEphemeralStorageStats(assert, p2, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats4}, []*volume.Metrics{containerLogStats4})
checkEphemeralStorageStats(assert, p2, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats4},
[]*volume.Metrics{containerLogStats4}, nil)
c3 := p2.Containers[0]
assert.Equal(cName3, c3.Name)
@ -352,6 +385,7 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) {
fakeRuntimeService,
nil,
nil,
&kubecontainertest.FakeOS{},
)
stats, err := provider.ListPodCPUAndMemoryStats()
@ -471,6 +505,7 @@ func TestCRIImagesFsStats(t *testing.T) {
fakeRuntimeService,
fakeImageService,
fakeLogStatsProvider,
&kubecontainertest.FakeOS{},
)
stats, err := provider.ImageFsStats()
@ -637,7 +672,8 @@ func checkEphemeralStorageStats(assert *assert.Assertions,
actual statsapi.PodStats,
volumes []statsapi.VolumeStats,
containers []*runtimeapi.ContainerStats,
containerLogStats []*volume.Metrics) {
containerLogStats []*volume.Metrics,
podLogStats *volume.Metrics) {
var totalUsed, inodesUsed uint64
for _, container := range containers {
totalUsed = totalUsed + container.WritableLayer.UsedBytes.Value
@ -651,6 +687,12 @@ func checkEphemeralStorageStats(assert *assert.Assertions,
for _, logStats := range containerLogStats {
totalUsed = totalUsed + uint64(logStats.Used.Value())
inodesUsed = inodesUsed + uint64(logStats.InodesUsed.Value())
}
if podLogStats != nil {
totalUsed = totalUsed + uint64(podLogStats.Used.Value())
inodesUsed = inodesUsed + uint64(podLogStats.InodesUsed.Value())
}
assert.Equal(int(totalUsed), int(*actual.EphemeralStorage.UsedBytes))

View File

@ -330,6 +330,64 @@ func getUint64Value(value *uint64) uint64 {
return *value
}
func Uint64Ptr(i uint64) *uint64 {
func uint64Ptr(i uint64) *uint64 {
return &i
}
func calcEphemeralStorage(containers []statsapi.ContainerStats, volumes []statsapi.VolumeStats, rootFsInfo *cadvisorapiv2.FsInfo,
podLogStats *statsapi.FsStats, isCRIStatsProvider bool) *statsapi.FsStats {
result := &statsapi.FsStats{
Time: metav1.NewTime(rootFsInfo.Timestamp),
AvailableBytes: &rootFsInfo.Available,
CapacityBytes: &rootFsInfo.Capacity,
InodesFree: rootFsInfo.InodesFree,
Inodes: rootFsInfo.Inodes,
}
for _, container := range containers {
addContainerUsage(result, &container, isCRIStatsProvider)
}
for _, volume := range volumes {
result.UsedBytes = addUsage(result.UsedBytes, volume.FsStats.UsedBytes)
result.InodesUsed = addUsage(result.InodesUsed, volume.InodesUsed)
result.Time = maxUpdateTime(&result.Time, &volume.FsStats.Time)
}
if podLogStats != nil {
result.UsedBytes = addUsage(result.UsedBytes, podLogStats.UsedBytes)
result.InodesUsed = addUsage(result.InodesUsed, podLogStats.InodesUsed)
result.Time = maxUpdateTime(&result.Time, &podLogStats.Time)
}
return result
}
func addContainerUsage(stat *statsapi.FsStats, container *statsapi.ContainerStats, isCRIStatsProvider bool) {
if rootFs := container.Rootfs; rootFs != nil {
stat.Time = maxUpdateTime(&stat.Time, &rootFs.Time)
stat.InodesUsed = addUsage(stat.InodesUsed, rootFs.InodesUsed)
stat.UsedBytes = addUsage(stat.UsedBytes, rootFs.UsedBytes)
if logs := container.Logs; logs != nil {
stat.UsedBytes = addUsage(stat.UsedBytes, logs.UsedBytes)
// We have accurate container log inode usage for CRI stats provider.
if isCRIStatsProvider {
stat.InodesUsed = addUsage(stat.InodesUsed, logs.InodesUsed)
}
stat.Time = maxUpdateTime(&stat.Time, &logs.Time)
}
}
}
func maxUpdateTime(first, second *metav1.Time) metav1.Time {
if first.Before(second) {
return *second
}
return *first
}
func addUsage(first, second *uint64) *uint64 {
if first == nil {
return second
} else if second == nil {
return first
}
total := *first + *second
return &total
}

View File

@ -42,8 +42,10 @@ func NewCRIStatsProvider(
runtimeService internalapi.RuntimeService,
imageService internalapi.ImageManagerService,
logMetricsService LogMetricsService,
osInterface kubecontainer.OSInterface,
) *StatsProvider {
return newStatsProvider(cadvisor, podManager, runtimeCache, newCRIStatsProvider(cadvisor, resourceAnalyzer, runtimeService, imageService, logMetricsService))
return newStatsProvider(cadvisor, podManager, runtimeCache, newCRIStatsProvider(cadvisor, resourceAnalyzer,
runtimeService, imageService, logMetricsService, osInterface))
}
// NewCadvisorStatsProvider returns a containerStatsProvider that provides both

View File

@ -157,10 +157,12 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
// get podID from created Pod
createdLogPod, err := podClient.Get(logPodName, metav1.GetOptions{})
podNs := createdLogPod.Namespace
podName := createdLogPod.Name
podID := string(createdLogPod.UID)
// build log cri file path
expectedCRILogFile := logCRIDir + "/" + podID + "/" + logContainerName + "/0.log"
expectedCRILogFile := logCRIDir + "/" + podNs + "_" + podName + "_" + podID + "/" + logContainerName + "/0.log"
logCRICheckPodName := "log-cri-check-" + string(uuid.NewUUID())
err = createAndWaitPod(makeLogCheckPod(logCRICheckPodName, logString, expectedCRILogFile))