Merge pull request #22316 from vishh/godeps-cadvisor

Auto commit by PR queue bot
pull/6/head
k8s-merge-robot 2016-03-02 05:12:01 -08:00
commit 83f9c0d4fd
29 changed files with 537 additions and 208 deletions

72
Godeps/Godeps.json generated
View File

@ -578,93 +578,93 @@
},
{
"ImportPath": "github.com/google/cadvisor/api",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/cache/memory",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/collector",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/container",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/events",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/fs",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/healthz",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/http",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/info/v1",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/info/v2",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/manager",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/metrics",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/pages",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/storage",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/summary",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/utils",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/validate",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/cadvisor/version",
"Comment": "v0.21.1",
"Rev": "49a08d5139ae0111757a110ea3b55fe9a607873d"
"Comment": "v0.22.0",
"Rev": "e39fb02c89f2b39808bc1723108cde5bb510e102"
},
{
"ImportPath": "github.com/google/gofuzz",

View File

@ -1,8 +1,9 @@
{
"endpoint" : "http://localhost:8080/metrics",
"polling_frequency" : 10,
"polling_frequency" : 10,
"metrics_config" : [
"go_goroutines"
"go_goroutines",
"qps"
]
}

View File

@ -44,10 +44,14 @@ type collectorInfo struct {
//regular expresssions for all metrics
regexps []*regexp.Regexp
// Limit for the number of srcaped metrics. If the count is higher,
// no metrics will be returned.
metricCountLimit int
}
//Returns a new collector using the information extracted from the configfile
func NewCollector(collectorName string, configFile []byte) (*GenericCollector, error) {
func NewCollector(collectorName string, configFile []byte, metricCountLimit int) (*GenericCollector, error) {
var configInJSON Config
err := json.Unmarshal(configFile, &configInJSON)
if err != nil {
@ -83,12 +87,18 @@ func NewCollector(collectorName string, configFile []byte) (*GenericCollector, e
minPollFrequency = minSupportedFrequency
}
if len(configInJSON.MetricsConfig) > metricCountLimit {
return nil, fmt.Errorf("Too many metrics defined: %d limit: %d", len(configInJSON.MetricsConfig), metricCountLimit)
}
return &GenericCollector{
name: collectorName,
configFile: configInJSON,
info: &collectorInfo{
minPollingFrequency: minPollFrequency,
regexps: regexprs},
regexps: regexprs,
metricCountLimit: metricCountLimit,
},
}, nil
}
@ -134,6 +144,7 @@ func (collector *GenericCollector) Collect(metrics map[string][]v1.MetricVal) (t
}
var errorSlice []error
for ind, metricConfig := range collector.configFile.MetricsConfig {
matchString := collector.info.regexps[ind].FindStringSubmatch(string(pageContent))
if matchString != nil {

View File

@ -16,6 +16,7 @@ package collector
import (
"encoding/json"
"fmt"
"io/ioutil"
"math"
"net/http"
@ -38,10 +39,14 @@ type PrometheusCollector struct {
// the metrics to gather (uses a map as a set)
metricsSet map[string]bool
// Limit for the number of scaped metrics. If the count is higher,
// no metrics will be returned.
metricCountLimit int
}
//Returns a new collector using the information extracted from the configfile
func NewPrometheusCollector(collectorName string, configFile []byte) (*PrometheusCollector, error) {
func NewPrometheusCollector(collectorName string, configFile []byte, metricCountLimit int) (*PrometheusCollector, error) {
var configInJSON Prometheus
err := json.Unmarshal(configFile, &configInJSON)
if err != nil {
@ -57,6 +62,10 @@ func NewPrometheusCollector(collectorName string, configFile []byte) (*Prometheu
minPollingFrequency = minSupportedFrequency
}
if metricCountLimit < 0 {
return nil, fmt.Errorf("Metric count limit must be greater than 0")
}
var metricsSet map[string]bool
if len(configInJSON.MetricsConfig) > 0 {
metricsSet = make(map[string]bool, len(configInJSON.MetricsConfig))
@ -65,12 +74,17 @@ func NewPrometheusCollector(collectorName string, configFile []byte) (*Prometheu
}
}
if len(configInJSON.MetricsConfig) > metricCountLimit {
return nil, fmt.Errorf("Too many metrics defined: %d limit %d", len(configInJSON.MetricsConfig), metricCountLimit)
}
//TODO : Add checks for validity of config file (eg : Accurate JSON fields)
return &PrometheusCollector{
name: collectorName,
pollingFrequency: minPollingFrequency,
configFile: configInJSON,
metricsSet: metricsSet,
metricCountLimit: metricCountLimit,
}, nil
}
@ -148,6 +162,8 @@ func (collector *PrometheusCollector) Collect(metrics map[string][]v1.MetricVal)
var errorSlice []error
lines := strings.Split(string(pageContent), "\n")
newMetrics := make(map[string][]v1.MetricVal)
for _, line := range lines {
if line == "" {
break
@ -182,8 +198,15 @@ func (collector *PrometheusCollector) Collect(metrics map[string][]v1.MetricVal)
FloatValue: metVal,
Timestamp: currentTime,
}
metrics[metName] = append(metrics[metName], metric)
newMetrics[metName] = append(newMetrics[metName], metric)
if len(newMetrics) > collector.metricCountLimit {
return nextCollectionTime, nil, fmt.Errorf("too many metrics to collect")
}
}
}
for key, val := range newMetrics {
metrics[key] = append(metrics[key], val...)
}
return nextCollectionTime, metrics, compileErrors(errorSlice)
}

View File

@ -21,17 +21,14 @@ import (
"regexp"
"strconv"
"strings"
"sync"
"github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/libcontainer"
"github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/utils"
"github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
var ArgDockerEndpoint = flag.String("docker", "unix:///var/run/docker.sock", "docker endpoint")
@ -46,9 +43,7 @@ var dockerRunDir = flag.String("docker_run", "/var/run/docker", "Absolute path t
// Regexp that identifies docker cgroups, containers started with
// --cgroup-parent have another prefix than 'docker'
var dockerCgroupRegexp = regexp.MustCompile(`.+-([a-z0-9]{64})\.scope$`)
var noSystemd = flag.Bool("nosystemd", false, "Explicitly disable systemd support for Docker containers")
var dockerCgroupRegexp = regexp.MustCompile(`([a-z0-9]{64})`)
var dockerEnvWhitelist = flag.String("docker_env_metadata_whitelist", "", "a comma-separated list of environment variable keys that needs to be collected for docker containers")
@ -58,36 +53,10 @@ func DockerStateDir() string {
return libcontainer.DockerStateDir(*dockerRootDir)
}
// Whether the system is using Systemd.
var useSystemd = false
var check = sync.Once{}
const (
dockerRootDirKey = "Root Dir"
)
func UseSystemd() bool {
check.Do(func() {
if *noSystemd {
return
}
// Check for system.slice in systemd and cpu cgroup.
for _, cgroupType := range []string{"name=systemd", "cpu"} {
mnt, err := cgroups.FindCgroupMountpoint(cgroupType)
if err == nil {
// systemd presence does not mean systemd controls cgroups.
// If system.slice cgroup exists, then systemd is taking control.
// This breaks if user creates system.slice manually :)
if utils.FileExists(path.Join(mnt, "system.slice")) {
useSystemd = true
break
}
}
}
})
return useSystemd
}
func RootDir() string {
return *dockerRootDir
}
@ -117,6 +86,8 @@ type dockerFactory struct {
fsInfo fs.FsInfo
dockerVersion []int
ignoreMetrics container.MetricSet
}
func (self *dockerFactory) String() string {
@ -142,6 +113,7 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
inHostNamespace,
metadataEnvs,
self.dockerVersion,
self.ignoreMetrics,
)
return
}
@ -150,21 +122,15 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
func ContainerNameToDockerId(name string) string {
id := path.Base(name)
// Turn systemd cgroup name into Docker ID.
if UseSystemd() {
if matches := dockerCgroupRegexp.FindStringSubmatch(id); matches != nil {
id = matches[1]
}
if matches := dockerCgroupRegexp.FindStringSubmatch(id); matches != nil {
return matches[1]
}
return id
}
func isContainerName(name string) bool {
if UseSystemd() {
return dockerCgroupRegexp.MatchString(path.Base(name))
}
return true
return dockerCgroupRegexp.MatchString(path.Base(name))
}
// Docker handles all containers under /docker
@ -215,11 +181,7 @@ func parseDockerVersion(full_version_string string) ([]int, error) {
}
// Register root container before running this function!
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo) error {
if UseSystemd() {
glog.Infof("System is using systemd")
}
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
client, err := Client()
if err != nil {
return fmt.Errorf("unable to communicate with docker daemon: %v", err)
@ -277,7 +239,9 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo) error {
machineInfoFactory: factory,
storageDriver: storageDriver(sd),
storageDir: storageDir,
ignoreMetrics: ignoreMetrics,
}
container.RegisterContainerHandlerFactory(f)
return nil
}

View File

@ -36,6 +36,7 @@ type realFsHandler struct {
usageBytes uint64
baseUsageBytes uint64
period time.Duration
minPeriod time.Duration
rootfs string
extraDir string
fsInfo fs.FsInfo
@ -43,7 +44,11 @@ type realFsHandler struct {
stopChan chan struct{}
}
const longDu = time.Second
const (
longDu = time.Second
duTimeout = time.Minute
maxDuBackoffFactor = 20
)
var _ fsHandler = &realFsHandler{}
@ -53,6 +58,7 @@ func newFsHandler(period time.Duration, rootfs, extraDir string, fsInfo fs.FsInf
usageBytes: 0,
baseUsageBytes: 0,
period: period,
minPeriod: period,
rootfs: rootfs,
extraDir: extraDir,
fsInfo: fsInfo,
@ -60,20 +66,24 @@ func newFsHandler(period time.Duration, rootfs, extraDir string, fsInfo fs.FsInf
}
}
func (fh *realFsHandler) needsUpdate() bool {
return time.Now().After(fh.lastUpdate.Add(fh.period))
}
func (fh *realFsHandler) update() error {
var (
baseUsage, extraDirUsage uint64
err error
)
// TODO(vishh): Add support for external mounts.
baseUsage, err := fh.fsInfo.GetDirUsage(fh.rootfs)
if err != nil {
return err
if fh.rootfs != "" {
baseUsage, err = fh.fsInfo.GetDirUsage(fh.rootfs, duTimeout)
if err != nil {
return err
}
}
extraDirUsage, err := fh.fsInfo.GetDirUsage(fh.extraDir)
if err != nil {
return err
if fh.extraDir != "" {
extraDirUsage, err = fh.fsInfo.GetDirUsage(fh.extraDir, duTimeout)
if err != nil {
return err
}
}
fh.Lock()
@ -93,11 +103,17 @@ func (fh *realFsHandler) trackUsage() {
case <-time.After(fh.period):
start := time.Now()
if err := fh.update(); err != nil {
glog.V(2).Infof("failed to collect filesystem stats - %v", err)
glog.Errorf("failed to collect filesystem stats - %v", err)
fh.period = fh.period * 2
if fh.period > maxDuBackoffFactor*fh.minPeriod {
fh.period = maxDuBackoffFactor * fh.minPeriod
}
} else {
fh.period = fh.minPeriod
}
duration := time.Since(start)
if duration > longDu {
glog.V(3).Infof("`du` on following dirs took %v: %v", duration, []string{fh.rootfs, fh.extraDir})
glog.V(2).Infof("`du` on following dirs took %v: %v", duration, []string{fh.rootfs, fh.extraDir})
}
}
}

View File

@ -81,6 +81,8 @@ type dockerContainerHandler struct {
// Filesystem handler.
fsHandler fsHandler
ignoreMetrics container.MetricSet
}
func getRwLayerID(containerID, storageDir string, sd storageDriver, dockerVersion []int) (string, error) {
@ -111,6 +113,7 @@ func newDockerContainerHandler(
inHostNamespace bool,
metadataEnvs []string,
dockerVersion []int,
ignoreMetrics container.MetricSet,
) (container.ContainerHandler, error) {
// Create the cgroup paths.
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
@ -129,11 +132,13 @@ func newDockerContainerHandler(
rootFs := "/"
if !inHostNamespace {
rootFs = "/rootfs"
storageDir = path.Join(rootFs, storageDir)
}
id := ContainerNameToDockerId(name)
// Add the Containers dir where the log files are stored.
// FIXME: Give `otherStorageDir` a more descriptive name.
otherStorageDir := path.Join(storageDir, pathToContainersDir, id)
rwLayerID, err := getRwLayerID(id, storageDir, storageDriver, dockerVersion)
@ -159,8 +164,12 @@ func newDockerContainerHandler(
fsInfo: fsInfo,
rootFs: rootFs,
rootfsStorageDir: rootfsStorageDir,
fsHandler: newFsHandler(time.Minute, rootfsStorageDir, otherStorageDir, fsInfo),
envs: make(map[string]string),
ignoreMetrics: ignoreMetrics,
}
if !ignoreMetrics.Has(container.DiskUsageMetrics) {
handler.fsHandler = newFsHandler(time.Minute, rootfsStorageDir, otherStorageDir, fsInfo)
}
// We assume that if Inspect fails then the container is not known to docker.
@ -192,11 +201,15 @@ func newDockerContainerHandler(
func (self *dockerContainerHandler) Start() {
// Start the filesystem handler.
self.fsHandler.start()
if self.fsHandler != nil {
self.fsHandler.start()
}
}
func (self *dockerContainerHandler) Cleanup() {
self.fsHandler.stop()
if self.fsHandler != nil {
self.fsHandler.stop()
}
}
func (self *dockerContainerHandler) ContainerReference() (info.ContainerReference, error) {
@ -253,8 +266,11 @@ func libcontainerConfigToContainerSpec(config *libcontainerconfigs.Config, mi *i
return spec
}
func hasNet(networkMode string) bool {
return !strings.HasPrefix(networkMode, "container:")
func (self *dockerContainerHandler) needNet() bool {
if !self.ignoreMetrics.Has(container.NetworkUsageMetrics) {
return !strings.HasPrefix(self.networkMode, "container:")
}
return false
}
func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
@ -270,22 +286,25 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
spec := libcontainerConfigToContainerSpec(libcontainerConfig, mi)
spec.CreationTime = self.creationTime
switch self.storageDriver {
case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver:
spec.HasFilesystem = true
default:
spec.HasFilesystem = false
if !self.ignoreMetrics.Has(container.DiskUsageMetrics) {
switch self.storageDriver {
case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver:
spec.HasFilesystem = true
}
}
spec.Labels = self.labels
spec.Envs = self.envs
spec.Image = self.image
spec.HasNetwork = hasNet(self.networkMode)
spec.HasNetwork = self.needNet()
return spec, err
}
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
if self.ignoreMetrics.Has(container.DiskUsageMetrics) {
return nil
}
switch self.storageDriver {
case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver:
default:
@ -301,16 +320,21 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
if err != nil {
return err
}
var limit uint64 = 0
var (
limit uint64
fsType string
)
// Docker does not impose any filesystem limits for containers. So use capacity as limit.
for _, fs := range mi.Filesystems {
if fs.Device == deviceInfo.Device {
limit = fs.Capacity
fsType = fs.Type
break
}
}
fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit}
fsStat := info.FsStats{Device: deviceInfo.Device, Type: fsType, Limit: limit}
fsStat.BaseUsage, fsStat.Usage = self.fsHandler.usage()
stats.Filesystem = append(stats.Filesystem, fsStat)
@ -320,7 +344,7 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
// TODO(vmarmol): Get from libcontainer API instead of cgroup manager when we don't have to support older Dockers.
func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
stats, err := containerlibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid)
stats, err := containerlibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid, self.ignoreMetrics)
if err != nil {
return stats, err
}
@ -328,7 +352,7 @@ func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
// includes containers running in Kubernetes pods that use the network of the
// infrastructure container. This stops metrics being reported multiple times
// for each container in a pod.
if !hasNet(self.networkMode) {
if !self.needNet() {
stats.Network = info.NetworkStats{}
}

View File

@ -35,6 +35,35 @@ type ContainerHandlerFactory interface {
DebugInfo() map[string][]string
}
// MetricKind represents the kind of metrics that cAdvisor exposes.
type MetricKind string
const (
CpuUsageMetrics MetricKind = "cpu"
MemoryUsageMetrics MetricKind = "memory"
CpuLoadMetrics MetricKind = "cpuLoad"
DiskIOMetrics MetricKind = "diskIO"
DiskUsageMetrics MetricKind = "disk"
NetworkUsageMetrics MetricKind = "network"
NetworkTcpUsageMetrics MetricKind = "tcp"
AppMetrics MetricKind = "app"
)
func (mk MetricKind) String() string {
return string(mk)
}
type MetricSet map[MetricKind]struct{}
func (ms MetricSet) Has(mk MetricKind) bool {
_, exists := ms[mk]
return exists
}
func (ms MetricSet) Add(mk MetricKind) {
ms[mk] = struct{}{}
}
// TODO(vmarmol): Consider not making this global.
// Global list of factories.
var (

View File

@ -17,12 +17,14 @@ package libcontainer
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/google/cadvisor/container"
info "github.com/google/cadvisor/info/v1"
"github.com/golang/glog"
@ -79,7 +81,7 @@ var supportedSubsystems map[string]struct{} = map[string]struct{}{
}
// Get cgroup and networking stats of the specified container
func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int) (*info.ContainerStats, error) {
func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetrics container.MetricSet) (*info.ContainerStats, error) {
cgroupStats, err := cgroupManager.GetStats()
if err != nil {
return nil, err
@ -91,27 +93,29 @@ func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int) (*info.Cont
// If we know the pid then get network stats from /proc/<pid>/net/dev
if pid > 0 {
netStats, err := networkStatsFromProc(rootFs, pid)
if err != nil {
glog.V(2).Infof("Unable to get network stats from pid %d: %v", pid, err)
} else {
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
if !ignoreMetrics.Has(container.NetworkUsageMetrics) {
netStats, err := networkStatsFromProc(rootFs, pid)
if err != nil {
glog.V(2).Infof("Unable to get network stats from pid %d: %v", pid, err)
} else {
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
}
}
if !ignoreMetrics.Has(container.NetworkTcpUsageMetrics) {
t, err := tcpStatsFromProc(rootFs, pid, "net/tcp")
if err != nil {
glog.V(2).Infof("Unable to get tcp stats from pid %d: %v", pid, err)
} else {
stats.Network.Tcp = t
}
// Commenting out to disable: too CPU intensive
/*t, err := tcpStatsFromProc(rootFs, pid, "net/tcp")
if err != nil {
glog.V(2).Infof("Unable to get tcp stats from pid %d: %v", pid, err)
} else {
stats.Network.Tcp = t
t6, err := tcpStatsFromProc(rootFs, pid, "net/tcp6")
if err != nil {
glog.V(2).Infof("Unable to get tcp6 stats from pid %d: %v", pid, err)
} else {
stats.Network.Tcp6 = t6
}
}
t6, err := tcpStatsFromProc(rootFs, pid, "net/tcp6")
if err != nil {
glog.V(2).Infof("Unable to get tcp6 stats from pid %d: %v", pid, err)
} else {
stats.Network.Tcp6 = t6
}*/
}
// For backwards compatibility.
@ -211,7 +215,6 @@ func setInterfaceStatValues(fields []string, pointers []*uint64) error {
return nil
}
/*
func tcpStatsFromProc(rootFs string, pid int, file string) (info.TcpStat, error) {
tcpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
@ -286,7 +289,6 @@ func scanTcpStats(tcpStatsFile string) (info.TcpStat, error) {
return stats, nil
}
*/
func GetProcesses(cgroupManager cgroups.Manager) ([]int, error) {
pids, err := cgroupManager.GetPids()

View File

@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build test
package container
import (

View File

@ -40,6 +40,9 @@ type rawFactory struct {
// Watcher for inotify events.
watcher *InotifyWatcher
// List of metrics to be ignored.
ignoreMetrics map[container.MetricKind]struct{}
}
func (self *rawFactory) String() string {
@ -51,7 +54,7 @@ func (self *rawFactory) NewContainerHandler(name string, inHostNamespace bool) (
if !inHostNamespace {
rootFs = "/rootfs"
}
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher, rootFs)
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher, rootFs, self.ignoreMetrics)
}
// The raw factory can handle any container. If --docker_only is set to false, non-docker containers are ignored.
@ -77,7 +80,7 @@ func (self *rawFactory) DebugInfo() map[string][]string {
return out
}
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo) error {
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics map[container.MetricKind]struct{}) error {
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems()
if err != nil {
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
@ -97,6 +100,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo) erro
fsInfo: fsInfo,
cgroupSubsystems: &cgroupSubsystems,
watcher: watcher,
ignoreMetrics: ignoreMetrics,
}
container.RegisterContainerHandlerFactory(factory)
return nil

View File

@ -57,16 +57,16 @@ type rawContainerHandler struct {
// Manager of this container's cgroups.
cgroupManager cgroups.Manager
// Whether this container has network isolation enabled.
hasNetwork bool
fsInfo fs.FsInfo
externalMounts []mount
rootFs string
// Metrics to be ignored.
ignoreMetrics container.MetricSet
}
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *InotifyWatcher, rootFs string) (container.ContainerHandler, error) {
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *InotifyWatcher, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) {
// Create the cgroup paths.
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
for key, val := range cgroupSubsystems.MountPoints {
@ -86,15 +86,9 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu
Paths: cgroupPaths,
}
hasNetwork := false
var externalMounts []mount
for _, container := range cHints.AllHosts {
if name == container.FullName {
/*libcontainerState.NetworkState = network.NetworkState{
VethHost: container.NetworkInterface.VethHost,
VethChild: container.NetworkInterface.VethChild,
}
hasNetwork = true*/
externalMounts = container.Mounts
break
}
@ -108,10 +102,10 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu
cgroupPaths: cgroupPaths,
cgroupManager: cgroupManager,
fsInfo: fsInfo,
hasNetwork: hasNetwork,
externalMounts: externalMounts,
watcher: watcher,
rootFs: rootFs,
ignoreMetrics: ignoreMetrics,
}, nil
}
@ -207,6 +201,16 @@ func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
if utils.FileExists(cpuRoot) {
spec.HasCpu = true
spec.Cpu.Limit = readUInt64(cpuRoot, "cpu.shares")
spec.Cpu.Period = readUInt64(cpuRoot, "cpu.cfs_period_us")
quota := readString(cpuRoot, "cpu.cfs_quota_us")
if quota != "" && quota != "-1" {
val, err := strconv.ParseUint(quota, 10, 64)
if err != nil {
glog.Errorf("raw driver: Failed to parse CPUQuota from %q: %s", path.Join(cpuRoot, "cpu.cfs_quota_us"), err)
}
spec.Cpu.Quota = val
}
}
}
@ -256,9 +260,6 @@ func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
spec.HasFilesystem = true
}
//Network
spec.HasNetwork = self.hasNetwork
// DiskIo.
if blkioRoot, ok := self.cgroupPaths["blkio"]; ok && utils.FileExists(blkioRoot) {
spec.HasDiskIo = true
@ -286,9 +287,11 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
stats.Filesystem = append(stats.Filesystem,
info.FsStats{
Device: fs.Device,
Type: fs.Type.String(),
Limit: fs.Capacity,
Usage: fs.Capacity - fs.Free,
Available: fs.Available,
InodesFree: fs.InodesFree,
ReadsCompleted: fs.DiskStats.ReadsCompleted,
ReadsMerged: fs.DiskStats.ReadsMerged,
SectorsRead: fs.DiskStats.SectorsRead,
@ -316,8 +319,10 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
stats.Filesystem = append(stats.Filesystem,
info.FsStats{
Device: fs.Device,
Type: fs.Type.String(),
Limit: fs.Capacity,
Usage: fs.Capacity - fs.Free,
InodesFree: fs.InodesFree,
ReadsCompleted: fs.DiskStats.ReadsCompleted,
ReadsMerged: fs.DiskStats.ReadsMerged,
SectorsRead: fs.DiskStats.SectorsRead,
@ -336,7 +341,7 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
}
func (self *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
stats, err := libcontainer.GetStats(self.cgroupManager, self.rootFs, os.Getpid())
stats, err := libcontainer.GetStats(self.cgroupManager, self.rootFs, os.Getpid(), self.ignoreMetrics)
if err != nil {
return stats, err
}

View File

@ -21,6 +21,7 @@ import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
@ -29,6 +30,7 @@ import (
"strconv"
"strings"
"syscall"
"time"
"github.com/docker/docker/pkg/mount"
"github.com/golang/glog"
@ -114,7 +116,7 @@ func NewFsInfo(context Context) (FsInfo, error) {
// return any information or error, as we want to report based on the actual partition where the
// loopback file resides, inside of the loopback file itself.
func (self *RealFsInfo) getDockerDeviceMapperInfo(dockerInfo map[string]string) (string, *partition, error) {
if storageDriver, ok := dockerInfo["Driver"]; ok && storageDriver != "devicemapper" {
if storageDriver, ok := dockerInfo["Driver"]; ok && storageDriver != DeviceMapper.String() {
return "", nil, nil
}
@ -134,7 +136,7 @@ func (self *RealFsInfo) getDockerDeviceMapperInfo(dockerInfo map[string]string)
}
return dev, &partition{
fsType: "devicemapper",
fsType: DeviceMapper.String(),
major: major,
minor: minor,
blockSize: blockSize,
@ -244,27 +246,30 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
_, hasDevice := deviceSet[device]
if mountSet == nil || (hasMount && !hasDevice) {
var (
total, free, avail uint64
err error
err error
fs Fs
)
switch partition.fsType {
case "devicemapper":
total, free, avail, err = getDMStats(device, partition.blockSize)
case "zfs":
total, free, avail, err = getZfstats(device)
case DeviceMapper.String():
fs.Capacity, fs.Free, fs.Available, err = getDMStats(device, partition.blockSize)
fs.Type = DeviceMapper
case ZFS.String():
fs.Capacity, fs.Free, fs.Available, err = getZfstats(device)
fs.Type = ZFS
default:
total, free, avail, err = getVfsStats(partition.mountpoint)
fs.Capacity, fs.Free, fs.Available, fs.Inodes, fs.InodesFree, err = getVfsStats(partition.mountpoint)
fs.Type = VFS
}
if err != nil {
glog.Errorf("Stat fs failed. Error: %v", err)
} else {
deviceSet[device] = struct{}{}
deviceInfo := DeviceInfo{
fs.DeviceInfo = DeviceInfo{
Device: device,
Major: uint(partition.major),
Minor: uint(partition.minor),
}
fs := Fs{deviceInfo, total, free, avail, diskStatsMap[device]}
fs.DiskStats = diskStatsMap[device]
filesystems = append(filesystems, fs)
}
}
@ -355,27 +360,56 @@ func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
return nil, fmt.Errorf("could not find device with major: %d, minor: %d in cached partitions map", major, minor)
}
func (self *RealFsInfo) GetDirUsage(dir string) (uint64, error) {
out, err := exec.Command("nice", "-n", "19", "du", "-s", dir).CombinedOutput()
if err != nil {
return 0, fmt.Errorf("du command failed on %s with output %s - %s", dir, out, err)
func (self *RealFsInfo) GetDirUsage(dir string, timeout time.Duration) (uint64, error) {
if dir == "" {
return 0, fmt.Errorf("invalid directory")
}
usageInKb, err := strconv.ParseUint(strings.Fields(string(out))[0], 10, 64)
cmd := exec.Command("nice", "-n", "19", "du", "-s", dir)
stdoutp, err := cmd.StdoutPipe()
if err != nil {
return 0, fmt.Errorf("cannot parse 'du' output %s - %s", out, err)
return 0, fmt.Errorf("failed to setup stdout for cmd %v - %v", cmd.Args, err)
}
stderrp, err := cmd.StderrPipe()
if err != nil {
return 0, fmt.Errorf("failed to setup stderr for cmd %v - %v", cmd.Args, err)
}
if err := cmd.Start(); err != nil {
return 0, fmt.Errorf("failed to exec du - %v", err)
}
stdoutb, souterr := ioutil.ReadAll(stdoutp)
stderrb, _ := ioutil.ReadAll(stderrp)
timer := time.AfterFunc(timeout, func() {
glog.Infof("killing cmd %v due to timeout(%s)", cmd.Args, timeout.String())
cmd.Process.Kill()
})
err = cmd.Wait()
timer.Stop()
if err != nil {
return 0, fmt.Errorf("du command failed on %s with output stdout: %s, stderr: %s - %v", dir, string(stdoutb), string(stderrb), err)
}
stdout := string(stdoutb)
if souterr != nil {
glog.Errorf("failed to read from stdout for cmd %v - %v", cmd.Args, souterr)
}
usageInKb, err := strconv.ParseUint(strings.Fields(stdout)[0], 10, 64)
if err != nil {
return 0, fmt.Errorf("cannot parse 'du' output %s - %s", stdout, err)
}
return usageInKb * 1024, nil
}
func getVfsStats(path string) (uint64, uint64, uint64, error) {
func getVfsStats(path string) (total uint64, free uint64, avail uint64, inodes uint64, inodesFree uint64, err error) {
var s syscall.Statfs_t
if err := syscall.Statfs(path, &s); err != nil {
return 0, 0, 0, err
if err = syscall.Statfs(path, &s); err != nil {
return 0, 0, 0, 0, 0, err
}
total := uint64(s.Frsize) * s.Blocks
free := uint64(s.Frsize) * s.Bfree
avail := uint64(s.Frsize) * s.Bavail
return total, free, avail, nil
total = uint64(s.Frsize) * s.Blocks
free = uint64(s.Frsize) * s.Bfree
avail = uint64(s.Frsize) * s.Bavail
inodes = uint64(s.Files)
inodesFree = uint64(s.Ffree)
return total, free, avail, inodes, inodesFree, nil
}
func dockerStatusValue(status [][]string, target string) string {

View File

@ -14,18 +14,35 @@
package fs
import "time"
type DeviceInfo struct {
Device string
Major uint
Minor uint
}
type FsType string
func (ft FsType) String() string {
return string(ft)
}
const (
ZFS FsType = "zfs"
DeviceMapper FsType = "devicemapper"
VFS FsType = "vfs"
)
type Fs struct {
DeviceInfo
Capacity uint64
Free uint64
Available uint64
DiskStats DiskStats
Type FsType
Capacity uint64
Free uint64
Available uint64
Inodes uint64
InodesFree uint64
DiskStats DiskStats
}
type DiskStats struct {
@ -50,7 +67,7 @@ type FsInfo interface {
GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, error)
// Returns number of bytes occupied by 'dir'.
GetDirUsage(dir string) (uint64, error)
GetDirUsage(dir string, timeout time.Duration) (uint64, error)
// Returns the block device info of the filesystem on which 'dir' resides.
GetDirFsDevice(dir string) (*DeviceInfo, error)

View File

@ -23,6 +23,8 @@ type CpuSpec struct {
Limit uint64 `json:"limit"`
MaxLimit uint64 `json:"max_limit"`
Mask string `json:"mask,omitempty"`
Quota uint64 `json:"quota,omitempty"`
Period uint64 `json:"period,omitempty"`
}
type MemorySpec struct {
@ -397,6 +399,9 @@ type FsStats struct {
// The block device name associated with the filesystem.
Device string `json:"device,omitempty"`
// Type of the filesytem.
Type string `json:"type"`
// Number of bytes that can be consumed by the container on this filesystem.
Limit uint64 `json:"capacity"`
@ -410,6 +415,9 @@ type FsStats struct {
// Number of bytes available for non-root user.
Available uint64 `json:"available"`
// Number of available Inodes
InodesFree uint64 `json:"inodes_free"`
// Number of reads completed
// This is the total number of reads completed successfully.
ReadsCompleted uint64 `json:"reads_completed"`

View File

@ -20,6 +20,12 @@ type FsInfo struct {
// Total number of bytes available on the filesystem.
Capacity uint64 `json:"capacity"`
// Type of device.
Type string `json:"type"`
// Total number of inodes available on the filesystem.
Inodes uint64 `json:"inodes"`
}
type Node struct {
@ -117,6 +123,7 @@ type CloudProvider string
const (
GCE CloudProvider = "GCE"
AWS = "AWS"
Azure = "Azure"
Baremetal = "Baremetal"
UnkownProvider = "Unknown"
)
@ -128,6 +135,12 @@ const (
UnknownInstance = "Unknown"
)
type InstanceID string
const (
UnNamedInstance InstanceID = "None"
)
type MachineInfo struct {
// The number of cores in this machine.
NumCores int `json:"num_cores"`
@ -165,6 +178,9 @@ type MachineInfo struct {
// Type of cloud instance (e.g. GCE standard) the machine is.
InstanceType InstanceType `json:"instance_type"`
// ID of cloud instance (e.g. instance-1) given to it by the cloud provider.
InstanceID InstanceID `json:"instance_id"`
}
type VersionInfo struct {

View File

@ -36,6 +36,10 @@ type CpuSpec struct {
// Cpu affinity mask.
// TODO(rjnagal): Add a library to convert mask string to set of cpu bitmask.
Mask string `json:"mask,omitempty"`
// CPUQuota Default is disabled
Quota uint64 `json:"quota,omitempty"`
// Period is the CPU reference time in ns e.g the quota is compared aginst this.
Period uint64 `json:"period,omitempty"`
}
type MemorySpec struct {

View File

@ -30,10 +30,12 @@ func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats {
ioDuration := time.Millisecond * time.Duration(stat.IoTime)
weightedDuration := time.Millisecond * time.Duration(stat.WeightedIoTime)
result = append(result, MachineFsStats{
Device: stat.Device,
Capacity: &stat.Limit,
Usage: &stat.Usage,
Available: &stat.Available,
Device: stat.Device,
Type: stat.Type,
Capacity: &stat.Limit,
Usage: &stat.Usage,
Available: &stat.Available,
InodesFree: &stat.InodesFree,
DiskStats: DiskStats{
ReadsCompleted: &stat.ReadsCompleted,
ReadsMerged: &stat.ReadsMerged,

View File

@ -112,6 +112,9 @@ type MachineFsStats struct {
// The block device name associated with the filesystem.
Device string `json:"device"`
// Type of filesystem.
Type string `json:"type"`
// Number of bytes that can be consumed on this filesystem.
Capacity *uint64 `json:"capacity,omitempty"`
@ -121,6 +124,9 @@ type MachineFsStats struct {
// Number of bytes available for non-root user on this filesystem.
Available *uint64 `json:"available,omitempty"`
// Number of inodes that are available on this filesystem.
InodesFree *uint64 `json:"inodes_free,omitempty"`
// DiskStats for this device.
DiskStats `json:"inline"`
}

View File

@ -96,6 +96,7 @@ func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (
realCloudInfo := cloudinfo.NewRealCloudInfo()
cloudProvider := realCloudInfo.GetCloudProvider()
instanceType := realCloudInfo.GetInstanceType()
instanceID := realCloudInfo.GetInstanceID()
machineInfo := &info.MachineInfo{
NumCores: numCores,
@ -109,10 +110,11 @@ func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (
BootID: getInfoFromFiles(filepath.Join(rootFs, *bootIdFilePath)),
CloudProvider: cloudProvider,
InstanceType: instanceType,
InstanceID: instanceID,
}
for _, fs := range filesystems {
machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{Device: fs.Device, Capacity: fs.Capacity})
machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{Device: fs.Device, Type: fs.Type.String(), Capacity: fs.Capacity, Inodes: fs.Inodes})
}
return machineInfo, nil

View File

@ -48,6 +48,43 @@ var logCadvisorUsage = flag.Bool("log_cadvisor_usage", false, "Whether to log th
var enableLoadReader = flag.Bool("enable_load_reader", false, "Whether to enable cpu load reader")
var eventStorageAgeLimit = flag.String("event_storage_age_limit", "default=24h", "Max length of time for which to store events (per type). Value is a comma separated list of key values, where the keys are event types (e.g.: creation, oom) or \"default\" and the value is a duration. Default is applied to all non-specified event types")
var eventStorageEventLimit = flag.String("event_storage_event_limit", "default=100000", "Max number of events to store (per type). Value is a comma separated list of key values, where the keys are event types (e.g.: creation, oom) or \"default\" and the value is an integer. Default is applied to all non-specified event types")
var applicationMetricsCountLimit = flag.Int("application_metrics_count_limit", 100, "Max number of application metrics to store (per container)")
var (
// Metrics to be ignored.
ignoreMetrics metricSetValue = metricSetValue{container.MetricSet{}}
// List of metrics that can be ignored.
ignoreWhitelist = container.MetricSet{
container.DiskUsageMetrics: struct{}{},
container.NetworkUsageMetrics: struct{}{},
container.NetworkTcpUsageMetrics: struct{}{},
}
)
func init() {
flag.Var(&ignoreMetrics, "disable_metrics", "comma-separated list of metrics to be disabled. Options are `disk`, `network`, `tcp`. Note: tcp is disabled by default due to high CPU usage.")
// Tcp metrics are ignored by default.
flag.Set("disable_metrics", "tcp")
}
type metricSetValue struct {
container.MetricSet
}
func (ml *metricSetValue) String() string {
return fmt.Sprint(*ml)
}
func (ml *metricSetValue) Set(value string) error {
for _, metric := range strings.Split(value, ",") {
if ignoreWhitelist.Has(container.MetricKind(metric)) {
(*ml).Add(container.MetricKind(metric))
} else {
return fmt.Errorf("unsupported metric %q specified in disable_metrics", metric)
}
}
return nil
}
// The Manager interface defines operations for starting a manager and getting
// container and machine information.
@ -146,6 +183,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
if _, err := os.Stat("/rootfs/proc"); os.IsNotExist(err) {
inHostNamespace = true
}
newManager := &manager{
containers: make(map[namespacedContainerName]*containerData),
quitChannels: make([]chan error, 0, 2),
@ -156,6 +194,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
startupTime: time.Now(),
maxHousekeepingInterval: maxHousekeepingInterval,
allowDynamicHousekeeping: allowDynamicHousekeeping,
ignoreMetrics: ignoreMetrics.MetricSet,
}
machineInfo, err := getMachineInfo(sysfs, fsInfo, inHostNamespace)
@ -198,18 +237,19 @@ type manager struct {
startupTime time.Time
maxHousekeepingInterval time.Duration
allowDynamicHousekeeping bool
ignoreMetrics container.MetricSet
}
// Start the container manager.
func (self *manager) Start() error {
// Register Docker container factory.
err := docker.Register(self, self.fsInfo)
err := docker.Register(self, self.fsInfo, self.ignoreMetrics)
if err != nil {
glog.Errorf("Docker container factory registration failed: %v.", err)
}
// Register the raw driver.
err = raw.Register(self, self.fsInfo)
err = raw.Register(self, self.fsInfo, self.ignoreMetrics)
if err != nil {
glog.Errorf("Registration of the raw container factory failed: %v", err)
}
@ -718,7 +758,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
glog.V(3).Infof("Got config from %q: %q", v, configFile)
if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") {
newCollector, err := collector.NewPrometheusCollector(k, configFile)
newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit)
if err != nil {
glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
return err
@ -729,7 +769,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
return err
}
} else {
newCollector, err := collector.NewCollector(k, configFile)
newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit)
if err != nil {
glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
return err
@ -784,7 +824,6 @@ func (m *manager) createContainer(containerName string) error {
err = m.registerCollectors(collectorConfigs, cont)
if err != nil {
glog.Infof("failed to register collectors for %q: %v", containerName, err)
return err
}
// Add the container name and all its aliases. The aliases must be within the namespace of the factory.

View File

@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build test
package manager
import (

View File

@ -542,10 +542,16 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.CreationTime.Unix()), baseLabelValues...)
if container.Spec.HasCpu {
desc = prometheus.NewDesc("container_spec_cpu_period", "CPU period of the container.", baseLabels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Period), baseLabelValues...)
if container.Spec.Cpu.Quota != 0 {
desc = prometheus.NewDesc("container_spec_cpu_quota", "CPU quota of the container.", baseLabels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Quota), baseLabelValues...)
}
desc := prometheus.NewDesc("container_spec_cpu_shares", "CPU share of the container.", baseLabels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Limit), baseLabelValues...)
}
}
if container.Spec.HasMemory {
desc := prometheus.NewDesc("container_spec_memory_limit_bytes", "Memory limit for the container.", baseLabels, nil)
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Limit), baseLabelValues...)

View File

@ -0,0 +1,45 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cloudinfo
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
info "github.com/google/cadvisor/info/v1"
)
func onAWS() bool {
client := ec2metadata.New(session.New(&aws.Config{}))
return client.Available()
}
func getAwsMetadata(name string) string {
client := ec2metadata.New(session.New(&aws.Config{}))
data, err := client.GetMetadata(name)
if err != nil {
return info.UnknownInstance
}
return data
}
func getAwsInstanceType() info.InstanceType {
return info.InstanceType(getAwsMetadata("instance-type"))
}
func getAwsInstanceID() info.InstanceID {
return info.InstanceID(getAwsMetadata("instance-id"))
}

View File

@ -0,0 +1,48 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cloudinfo
import (
info "github.com/google/cadvisor/info/v1"
"io/ioutil"
"strings"
)
const (
SysVendorFileName = "/sys/class/dmi/id/sys_vendor"
BiosUUIDFileName = "/sys/class/dmi/id/product_uuid"
MicrosoftCorporation = "Microsoft Corporation"
)
func onAzure() bool {
data, err := ioutil.ReadFile(SysVendorFileName)
if err != nil {
return false
}
return strings.Contains(string(data), MicrosoftCorporation)
}
//TODO: Implement method.
func getAzureInstanceType() info.InstanceType {
return info.UnknownInstance
}
func getAzureInstanceID() info.InstanceID {
data, err := ioutil.ReadFile(BiosUUIDFileName)
if err != nil {
return info.UnNamedInstance
}
return info.InstanceID(strings.TrimSuffix(string(data), "\n"))
}

View File

@ -23,19 +23,23 @@ import (
type CloudInfo interface {
GetCloudProvider() info.CloudProvider
GetInstanceType() info.InstanceType
GetInstanceID() info.InstanceID
}
type realCloudInfo struct {
cloudProvider info.CloudProvider
instanceType info.InstanceType
instanceID info.InstanceID
}
func NewRealCloudInfo() CloudInfo {
cloudProvider := detectCloudProvider()
instanceType := detectInstanceType(cloudProvider)
instanceID := detectInstanceID(cloudProvider)
return &realCloudInfo{
cloudProvider: cloudProvider,
instanceType: instanceType,
instanceID: instanceID,
}
}
@ -47,12 +51,18 @@ func (self *realCloudInfo) GetInstanceType() info.InstanceType {
return self.instanceType
}
func (self *realCloudInfo) GetInstanceID() info.InstanceID {
return self.instanceID
}
func detectCloudProvider() info.CloudProvider {
switch {
case onGCE():
return info.GCE
case onAWS():
return info.AWS
case onAzure():
return info.Azure
case onBaremetal():
return info.Baremetal
}
@ -65,20 +75,26 @@ func detectInstanceType(cloudProvider info.CloudProvider) info.InstanceType {
return getGceInstanceType()
case info.AWS:
return getAwsInstanceType()
case info.Azure:
return getAzureInstanceType()
case info.Baremetal:
return info.NoInstance
}
return info.UnknownInstance
}
//TODO: Implement method.
func onAWS() bool {
return false
}
//TODO: Implement method.
func getAwsInstanceType() info.InstanceType {
return info.UnknownInstance
func detectInstanceID(cloudProvider info.CloudProvider) info.InstanceID {
switch cloudProvider {
case info.GCE:
return getGceInstanceID()
case info.AWS:
return getAwsInstanceID()
case info.Azure:
return getAzureInstanceID()
case info.Baremetal:
return info.UnNamedInstance
}
return info.UnNamedInstance
}
//TODO: Implement method.

View File

@ -35,3 +35,11 @@ func getGceInstanceType() info.InstanceType {
responseParts := strings.Split(machineType, "/") // Extract the instance name from the machine type.
return info.InstanceType(responseParts[len(responseParts)-1])
}
func getGceInstanceID() info.InstanceID {
instanceID, err := metadata.Get("instance/id")
if err != nil {
return info.UnknownInstance
}
return info.InstanceID(info.InstanceType(instanceID))
}

View File

@ -192,11 +192,6 @@ func validateDockerInfo() (string, string) {
execDriver := info.Get("ExecutionDriver")
storageDriver := info.Get("Driver")
desc := fmt.Sprintf("Docker exec driver is %s. Storage driver is %s.\n", execDriver, storageDriver)
if docker.UseSystemd() {
desc += "\tsystemd is being used to create cgroups.\n"
} else {
desc += "\tCgroups are being created through cgroup filesystem.\n"
}
if strings.Contains(execDriver, "native") {
stateFile := docker.DockerStateDir()
if !utils.FileExists(stateFile) {

View File

@ -1 +1 @@
0.21.1
0.22.0