mirror of https://github.com/k3s-io/k3s
cadvisor bump to 0.16.0.2
parent
a7e47cada2
commit
d0887aec2b
|
@ -261,93 +261,93 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/api",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/cache/memory",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/collector",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/events",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/fs",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/healthz",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/http",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v1",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v2",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/manager",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/metrics",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/pages",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/storage",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/summary",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/validate",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/version",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
@ -449,8 +450,63 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
|
|||
}
|
||||
}
|
||||
|
||||
func instCpuStats(last, cur *info.ContainerStats) (*v2.CpuInstStats, error) {
|
||||
if last == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if !cur.Timestamp.After(last.Timestamp) {
|
||||
return nil, fmt.Errorf("container stats move backwards in time")
|
||||
}
|
||||
if len(last.Cpu.Usage.PerCpu) != len(cur.Cpu.Usage.PerCpu) {
|
||||
return nil, fmt.Errorf("different number of cpus")
|
||||
}
|
||||
timeDelta := cur.Timestamp.Sub(last.Timestamp)
|
||||
if timeDelta <= 100*time.Millisecond {
|
||||
return nil, fmt.Errorf("time delta unexpectedly small")
|
||||
}
|
||||
// Nanoseconds to gain precision and avoid having zero seconds if the
|
||||
// difference between the timestamps is just under a second
|
||||
timeDeltaNs := uint64(timeDelta.Nanoseconds())
|
||||
convertToRate := func(lastValue, curValue uint64) (uint64, error) {
|
||||
if curValue < lastValue {
|
||||
return 0, fmt.Errorf("cumulative stats decrease")
|
||||
}
|
||||
valueDelta := curValue - lastValue
|
||||
return (valueDelta * 1e9) / timeDeltaNs, nil
|
||||
}
|
||||
total, err := convertToRate(last.Cpu.Usage.Total, cur.Cpu.Usage.Total)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
percpu := make([]uint64, len(last.Cpu.Usage.PerCpu))
|
||||
for i := range percpu {
|
||||
var err error
|
||||
percpu[i], err = convertToRate(last.Cpu.Usage.PerCpu[i], cur.Cpu.Usage.PerCpu[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
user, err := convertToRate(last.Cpu.Usage.User, cur.Cpu.Usage.User)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
system, err := convertToRate(last.Cpu.Usage.System, cur.Cpu.Usage.System)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &v2.CpuInstStats{
|
||||
Usage: v2.CpuInstUsage{
|
||||
Total: total,
|
||||
PerCpu: percpu,
|
||||
User: user,
|
||||
System: system,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func convertStats(cont *info.ContainerInfo) []v2.ContainerStats {
|
||||
stats := []v2.ContainerStats{}
|
||||
stats := make([]v2.ContainerStats, 0, len(cont.Stats))
|
||||
var last *info.ContainerStats
|
||||
for _, val := range cont.Stats {
|
||||
stat := v2.ContainerStats{
|
||||
Timestamp: val.Timestamp,
|
||||
|
@ -463,6 +519,13 @@ func convertStats(cont *info.ContainerInfo) []v2.ContainerStats {
|
|||
}
|
||||
if stat.HasCpu {
|
||||
stat.Cpu = val.Cpu
|
||||
cpuInst, err := instCpuStats(last, val)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
} else {
|
||||
stat.CpuInst = cpuInst
|
||||
}
|
||||
last = val
|
||||
}
|
||||
if stat.HasMemory {
|
||||
stat.Memory = val.Memory
|
||||
|
|
|
@ -19,9 +19,11 @@ import (
|
|||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/events"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/info/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
@ -78,3 +80,170 @@ func TestGetEventRequestDoubleArgument(t *testing.T) {
|
|||
assert.True(t, stream)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestInstCpuStats(t *testing.T) {
|
||||
tests := []struct {
|
||||
last *info.ContainerStats
|
||||
cur *info.ContainerStats
|
||||
want *v2.CpuInstStats
|
||||
}{
|
||||
// Last is missing
|
||||
{
|
||||
nil,
|
||||
&info.ContainerStats{},
|
||||
nil,
|
||||
},
|
||||
// Goes back in time
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0).Add(time.Second),
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
},
|
||||
nil,
|
||||
},
|
||||
// Zero time delta
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
},
|
||||
nil,
|
||||
},
|
||||
// Unexpectedly small time delta
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0).Add(30 * time.Millisecond),
|
||||
},
|
||||
nil,
|
||||
},
|
||||
// Different number of cpus
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
PerCpu: []uint64{100, 200},
|
||||
},
|
||||
},
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0).Add(time.Second),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
PerCpu: []uint64{100, 200, 300},
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
// Stat numbers decrease
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 300,
|
||||
PerCpu: []uint64{100, 200},
|
||||
User: 250,
|
||||
System: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0).Add(time.Second),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 200,
|
||||
PerCpu: []uint64{100, 100},
|
||||
User: 150,
|
||||
System: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
// One second elapsed
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 300,
|
||||
PerCpu: []uint64{100, 200},
|
||||
User: 250,
|
||||
System: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0).Add(time.Second),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 500,
|
||||
PerCpu: []uint64{200, 300},
|
||||
User: 400,
|
||||
System: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
&v2.CpuInstStats{
|
||||
Usage: v2.CpuInstUsage{
|
||||
Total: 200,
|
||||
PerCpu: []uint64{100, 100},
|
||||
User: 150,
|
||||
System: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
// Two seconds elapsed
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 300,
|
||||
PerCpu: []uint64{100, 200},
|
||||
User: 250,
|
||||
System: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0).Add(2 * time.Second),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 500,
|
||||
PerCpu: []uint64{200, 300},
|
||||
User: 400,
|
||||
System: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
&v2.CpuInstStats{
|
||||
Usage: v2.CpuInstUsage{
|
||||
Total: 100,
|
||||
PerCpu: []uint64{50, 50},
|
||||
User: 75,
|
||||
System: 25,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range tests {
|
||||
got, err := instCpuStats(c.last, c.cur)
|
||||
if err != nil {
|
||||
if c.want == nil {
|
||||
continue
|
||||
}
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
assert.Equal(t, c.want, got)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ func (self *dockerFactory) String() string {
|
|||
return DockerNamespace
|
||||
}
|
||||
|
||||
func (self *dockerFactory) NewContainerHandler(name string) (handler container.ContainerHandler, err error) {
|
||||
func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
|
||||
client, err := docker.NewClient(*ArgDockerEndpoint)
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -108,6 +108,7 @@ func (self *dockerFactory) NewContainerHandler(name string) (handler container.C
|
|||
self.fsInfo,
|
||||
self.usesAufsDriver,
|
||||
&self.cgroupSubsystems,
|
||||
inHostNamespace,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -71,6 +71,18 @@ type dockerContainerHandler struct {
|
|||
|
||||
// Metadata labels associated with the container.
|
||||
labels map[string]string
|
||||
|
||||
// The container PID used to switch namespaces as required
|
||||
pid int
|
||||
|
||||
// Image name used for this container.
|
||||
image string
|
||||
|
||||
// The host root FS to read
|
||||
rootFs string
|
||||
|
||||
// The network mode of the container
|
||||
networkMode string
|
||||
}
|
||||
|
||||
func newDockerContainerHandler(
|
||||
|
@ -80,6 +92,7 @@ func newDockerContainerHandler(
|
|||
fsInfo fs.FsInfo,
|
||||
usesAufsDriver bool,
|
||||
cgroupSubsystems *containerLibcontainer.CgroupSubsystems,
|
||||
inHostNamespace bool,
|
||||
) (container.ContainerHandler, error) {
|
||||
// Create the cgroup paths.
|
||||
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
|
||||
|
@ -95,6 +108,11 @@ func newDockerContainerHandler(
|
|||
Paths: cgroupPaths,
|
||||
}
|
||||
|
||||
rootFs := "/"
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
|
||||
id := ContainerNameToDockerId(name)
|
||||
handler := &dockerContainerHandler{
|
||||
id: id,
|
||||
|
@ -105,6 +123,7 @@ func newDockerContainerHandler(
|
|||
cgroupManager: cgroupManager,
|
||||
usesAufsDriver: usesAufsDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootFs: rootFs,
|
||||
}
|
||||
handler.storageDirs = append(handler.storageDirs, path.Join(*dockerRootDir, pathToAufsDir, id))
|
||||
|
||||
|
@ -114,11 +133,14 @@ func newDockerContainerHandler(
|
|||
return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
|
||||
}
|
||||
handler.creationTime = ctnr.Created
|
||||
handler.pid = ctnr.State.Pid
|
||||
|
||||
// Add the name and bare ID as aliases of the container.
|
||||
handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"))
|
||||
handler.aliases = append(handler.aliases, id)
|
||||
handler.labels = ctnr.Config.Labels
|
||||
handler.image = ctnr.Config.Image
|
||||
handler.networkMode = ctnr.HostConfig.NetworkMode
|
||||
|
||||
return handler, nil
|
||||
}
|
||||
|
@ -167,21 +189,23 @@ func libcontainerConfigToContainerSpec(config *libcontainerConfigs.Config, mi *i
|
|||
}
|
||||
spec.Cpu.Mask = utils.FixCpuMask(config.Cgroups.CpusetCpus, mi.NumCores)
|
||||
|
||||
// Docker reports a loop device for containers with --net=host. Ignore
|
||||
// those too.
|
||||
networkCount := 0
|
||||
for _, n := range config.Networks {
|
||||
if n.Type != "loopback" {
|
||||
networkCount += 1
|
||||
}
|
||||
}
|
||||
|
||||
spec.HasNetwork = networkCount > 0
|
||||
spec.HasDiskIo = true
|
||||
|
||||
return spec
|
||||
}
|
||||
|
||||
var (
|
||||
hasNetworkModes = map[string]bool{
|
||||
"host": true,
|
||||
"bridge": true,
|
||||
"default": true,
|
||||
}
|
||||
)
|
||||
|
||||
func hasNet(networkMode string) bool {
|
||||
return hasNetworkModes[networkMode]
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
mi, err := self.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
|
@ -198,6 +222,8 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
|||
spec.HasFilesystem = true
|
||||
}
|
||||
spec.Labels = self.labels
|
||||
spec.Image = self.image
|
||||
spec.HasNetwork = hasNet(self.networkMode)
|
||||
|
||||
return spec, err
|
||||
}
|
||||
|
@ -247,32 +273,16 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
|||
|
||||
// TODO(vmarmol): Get from libcontainer API instead of cgroup manager when we don't have to support older Dockers.
|
||||
func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
config, err := self.readLibcontainerConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var networkInterfaces []string
|
||||
if len(config.Networks) > 0 {
|
||||
// ContainerStats only reports stat for one network device.
|
||||
// TODO(vmarmol): Handle multiple physical network devices.
|
||||
for _, n := range config.Networks {
|
||||
// Take the first non-loopback.
|
||||
if n.Type != "loopback" {
|
||||
networkInterfaces = []string{n.HostInterfaceName}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
stats, err := containerLibcontainer.GetStats(self.cgroupManager, networkInterfaces)
|
||||
stats, err := containerLibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
// TODO(rjnagal): Remove the conversion when network stats are read from libcontainer.
|
||||
convertInterfaceStats(&stats.Network.InterfaceStats)
|
||||
for i := range stats.Network.Interfaces {
|
||||
convertInterfaceStats(&stats.Network.Interfaces[i])
|
||||
// Clean up stats for containers that don't have their own network - this
|
||||
// includes containers running in Kubernetes pods that use the network of the
|
||||
// infrastructure container. This stops metrics being reported multiple times
|
||||
// for each container in a pod.
|
||||
if !hasNet(self.networkMode) {
|
||||
stats.Network = info.NetworkStats{}
|
||||
}
|
||||
|
||||
// Get filesystem stats.
|
||||
|
@ -284,21 +294,6 @@ func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
|
|||
return stats, nil
|
||||
}
|
||||
|
||||
func convertInterfaceStats(stats *info.InterfaceStats) {
|
||||
net := *stats
|
||||
|
||||
// Ingress for host veth is from the container.
|
||||
// Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
|
||||
stats.RxBytes = net.TxBytes
|
||||
stats.RxPackets = net.TxPackets
|
||||
stats.RxErrors = net.TxErrors
|
||||
stats.RxDropped = net.TxDropped
|
||||
stats.TxBytes = net.RxBytes
|
||||
stats.TxPackets = net.RxPackets
|
||||
stats.TxErrors = net.RxErrors
|
||||
stats.TxDropped = net.RxDropped
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
|
||||
if self.name != "/docker" {
|
||||
return []info.ContainerReference{}, nil
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
type ContainerHandlerFactory interface {
|
||||
// Create a new ContainerHandler using this factory. CanHandleAndAccept() must have returned true.
|
||||
NewContainerHandler(name string) (c ContainerHandler, err error)
|
||||
NewContainerHandler(name string, inHostNamespace bool) (c ContainerHandler, err error)
|
||||
|
||||
// Returns whether this factory can handle and accept the specified container.
|
||||
CanHandleAndAccept(name string) (handle bool, accept bool, err error)
|
||||
|
@ -60,7 +60,7 @@ func HasFactories() bool {
|
|||
}
|
||||
|
||||
// Create a new ContainerHandler for the specified container.
|
||||
func NewContainerHandler(name string) (ContainerHandler, bool, error) {
|
||||
func NewContainerHandler(name string, inHostNamespace bool) (ContainerHandler, bool, error) {
|
||||
factoriesLock.RLock()
|
||||
defer factoriesLock.RUnlock()
|
||||
|
||||
|
@ -76,7 +76,7 @@ func NewContainerHandler(name string) (ContainerHandler, bool, error) {
|
|||
return nil, false, nil
|
||||
}
|
||||
glog.V(3).Infof("Using factory %q for container %q", factory, name)
|
||||
handle, err := factory.NewContainerHandler(name)
|
||||
handle, err := factory.NewContainerHandler(name, inHostNamespace)
|
||||
return handle, canAccept, err
|
||||
} else {
|
||||
glog.V(4).Infof("Factory %q was unable to handle container %q", factory, name)
|
||||
|
|
|
@ -39,7 +39,7 @@ func (self *mockContainerHandlerFactory) CanHandleAndAccept(name string) (bool,
|
|||
return self.CanHandleValue, self.CanAcceptValue, nil
|
||||
}
|
||||
|
||||
func (self *mockContainerHandlerFactory) NewContainerHandler(name string) (ContainerHandler, error) {
|
||||
func (self *mockContainerHandlerFactory) NewContainerHandler(name string, isHostNamespace bool) (ContainerHandler, error) {
|
||||
args := self.Called(name)
|
||||
return args.Get(0).(ContainerHandler), args.Error(1)
|
||||
}
|
||||
|
@ -60,13 +60,13 @@ func TestNewContainerHandler_FirstMatches(t *testing.T) {
|
|||
RegisterContainerHandlerFactory(allwaysYes)
|
||||
|
||||
// The yes factory should be asked to create the ContainerHandler.
|
||||
mockContainer, err := mockFactory.NewContainerHandler(testContainerName)
|
||||
mockContainer, err := mockFactory.NewContainerHandler(testContainerName, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
allwaysYes.On("NewContainerHandler", testContainerName).Return(mockContainer, nil)
|
||||
|
||||
cont, _, err := NewContainerHandler(testContainerName)
|
||||
cont, _, err := NewContainerHandler(testContainerName, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -93,13 +93,13 @@ func TestNewContainerHandler_SecondMatches(t *testing.T) {
|
|||
RegisterContainerHandlerFactory(allwaysYes)
|
||||
|
||||
// The yes factory should be asked to create the ContainerHandler.
|
||||
mockContainer, err := mockFactory.NewContainerHandler(testContainerName)
|
||||
mockContainer, err := mockFactory.NewContainerHandler(testContainerName, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
allwaysYes.On("NewContainerHandler", testContainerName).Return(mockContainer, nil)
|
||||
|
||||
cont, _, err := NewContainerHandler(testContainerName)
|
||||
cont, _, err := NewContainerHandler(testContainerName, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ func TestNewContainerHandler_NoneMatch(t *testing.T) {
|
|||
}
|
||||
RegisterContainerHandlerFactory(allwaysNo2)
|
||||
|
||||
_, _, err := NewContainerHandler(testContainerName)
|
||||
_, _, err := NewContainerHandler(testContainerName, true)
|
||||
if err == nil {
|
||||
t.Error("Expected NewContainerHandler to fail")
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ func TestNewContainerHandler_Accept(t *testing.T) {
|
|||
}
|
||||
RegisterContainerHandlerFactory(cannotAccept)
|
||||
|
||||
_, accept, err := NewContainerHandler(testContainerName)
|
||||
_, accept, err := NewContainerHandler(testContainerName, true)
|
||||
if err != nil {
|
||||
t.Error("Expected NewContainerHandler to succeed")
|
||||
}
|
||||
|
|
90
Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
90
Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
|
@ -15,14 +15,19 @@
|
|||
package libcontainer
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libcontainer"
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
"github.com/golang/glog"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils/sysinfo"
|
||||
)
|
||||
|
||||
type CgroupSubsystems struct {
|
||||
|
@ -74,7 +79,7 @@ var supportedSubsystems map[string]struct{} = map[string]struct{}{
|
|||
}
|
||||
|
||||
// Get cgroup and networking stats of the specified container
|
||||
func GetStats(cgroupManager cgroups.Manager, networkInterfaces []string) (*info.ContainerStats, error) {
|
||||
func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int) (*info.ContainerStats, error) {
|
||||
cgroupStats, err := cgroupManager.GetStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -84,23 +89,90 @@ func GetStats(cgroupManager cgroups.Manager, networkInterfaces []string) (*info.
|
|||
}
|
||||
stats := toContainerStats(libcontainerStats)
|
||||
|
||||
// TODO(rjnagal): Use networking stats directly from libcontainer.
|
||||
stats.Network.Interfaces = make([]info.InterfaceStats, len(networkInterfaces))
|
||||
for i := range networkInterfaces {
|
||||
interfaceStats, err := sysinfo.GetNetworkStats(networkInterfaces[i])
|
||||
// If we know the pid then get network stats from /proc/<pid>/net/dev
|
||||
if pid > 0 {
|
||||
netStats, err := networkStatsFromProc(rootFs, pid)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
glog.V(2).Infof("Unable to get network stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
|
||||
}
|
||||
stats.Network.Interfaces[i] = interfaceStats
|
||||
}
|
||||
|
||||
// For backwards compatibility.
|
||||
if len(networkInterfaces) > 0 {
|
||||
if len(stats.Network.Interfaces) > 0 {
|
||||
stats.Network.InterfaceStats = stats.Network.Interfaces[0]
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func networkStatsFromProc(rootFs string, pid int) ([]info.InterfaceStats, error) {
|
||||
netStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), "/net/dev")
|
||||
|
||||
ifaceStats, err := scanInterfaceStats(netStatsFile)
|
||||
if err != nil {
|
||||
return []info.InterfaceStats{}, fmt.Errorf("couldn't read network stats: %v", err)
|
||||
}
|
||||
|
||||
return ifaceStats, nil
|
||||
}
|
||||
|
||||
var (
|
||||
ignoredDevicePrefixes = []string{"lo", "veth", "docker"}
|
||||
netStatLineRE = regexp.MustCompile("[ ]*(.+):([ ]+[0-9]+){16}")
|
||||
)
|
||||
|
||||
func isIgnoredDevice(ifName string) bool {
|
||||
for _, prefix := range ignoredDevicePrefixes {
|
||||
if strings.HasPrefix(strings.ToLower(ifName), prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func scanInterfaceStats(netStatsFile string) ([]info.InterfaceStats, error) {
|
||||
var (
|
||||
bkt uint64
|
||||
)
|
||||
|
||||
stats := []info.InterfaceStats{}
|
||||
|
||||
data, err := ioutil.ReadFile(netStatsFile)
|
||||
if err != nil {
|
||||
return stats, fmt.Errorf("failure opening %s: %v", netStatsFile, err)
|
||||
}
|
||||
|
||||
reader := strings.NewReader(string(data))
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if netStatLineRE.MatchString(line) {
|
||||
line = strings.Replace(line, ":", "", -1)
|
||||
|
||||
i := info.InterfaceStats{}
|
||||
|
||||
_, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d",
|
||||
&i.Name, &i.RxBytes, &i.RxPackets, &i.RxErrors, &i.RxDropped, &bkt, &bkt, &bkt,
|
||||
&bkt, &i.TxBytes, &i.TxPackets, &i.TxErrors, &i.TxDropped, &bkt, &bkt, &bkt, &bkt)
|
||||
|
||||
if err != nil {
|
||||
return stats, fmt.Errorf("failure opening %s: %v", netStatsFile, err)
|
||||
}
|
||||
|
||||
if !isIgnoredDevice(i.Name) {
|
||||
stats = append(stats, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func GetProcesses(cgroupManager cgroups.Manager) ([]int, error) {
|
||||
pids, err := cgroupManager.GetPids()
|
||||
if err != nil {
|
||||
|
|
|
@ -109,7 +109,7 @@ func (self *FactoryForMockContainerHandler) String() string {
|
|||
return self.Name
|
||||
}
|
||||
|
||||
func (self *FactoryForMockContainerHandler) NewContainerHandler(name string) (ContainerHandler, error) {
|
||||
func (self *FactoryForMockContainerHandler) NewContainerHandler(name string, inHostNamespace bool) (ContainerHandler, error) {
|
||||
handler := &MockContainerHandler{}
|
||||
if self.PrepareContainerHandlerFunc != nil {
|
||||
self.PrepareContainerHandlerFunc(name, handler)
|
||||
|
|
|
@ -45,8 +45,12 @@ func (self *rawFactory) String() string {
|
|||
return "raw"
|
||||
}
|
||||
|
||||
func (self *rawFactory) NewContainerHandler(name string) (container.ContainerHandler, error) {
|
||||
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher)
|
||||
func (self *rawFactory) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) {
|
||||
rootFs := "/"
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher, rootFs)
|
||||
}
|
||||
|
||||
// The raw factory can handle any container. If --docker_only is set to false, non-docker containers are ignored.
|
||||
|
|
|
@ -61,9 +61,11 @@ type rawContainerHandler struct {
|
|||
|
||||
fsInfo fs.FsInfo
|
||||
externalMounts []mount
|
||||
|
||||
rootFs string
|
||||
}
|
||||
|
||||
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *InotifyWatcher) (container.ContainerHandler, error) {
|
||||
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *InotifyWatcher, rootFs string) (container.ContainerHandler, error) {
|
||||
// Create the cgroup paths.
|
||||
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
|
||||
for key, val := range cgroupSubsystems.MountPoints {
|
||||
|
@ -108,6 +110,7 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu
|
|||
hasNetwork: hasNetwork,
|
||||
externalMounts: externalMounts,
|
||||
watcher: watcher,
|
||||
rootFs: rootFs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -326,15 +329,7 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
|||
}
|
||||
|
||||
func (self *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
nd, err := self.GetRootNetworkDevices()
|
||||
if err != nil {
|
||||
return new(info.ContainerStats), err
|
||||
}
|
||||
networkInterfaces := make([]string, len(nd))
|
||||
for i := range nd {
|
||||
networkInterfaces[i] = nd[i].Name
|
||||
}
|
||||
stats, err := libcontainer.GetStats(self.cgroupManager, networkInterfaces)
|
||||
stats, err := libcontainer.GetStats(self.cgroupManager, self.rootFs, os.Getpid())
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
|
|
@ -61,6 +61,9 @@ type ContainerSpec struct {
|
|||
|
||||
HasCustomMetrics bool `json:"has_custom_metrics"`
|
||||
CustomMetrics []MetricSpec `json:"custom_metrics,omitempty"`
|
||||
|
||||
// Image name used for this container.
|
||||
Image string `json:"image,omitempty"`
|
||||
}
|
||||
|
||||
// Container reference contains enough information to uniquely identify a container
|
||||
|
|
|
@ -80,14 +80,20 @@ type ContainerSpec struct {
|
|||
HasNetwork bool `json:"has_network"`
|
||||
HasFilesystem bool `json:"has_filesystem"`
|
||||
HasDiskIo bool `json:"has_diskio"`
|
||||
|
||||
// Image name used for this container.
|
||||
Image string `json:"image,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerStats struct {
|
||||
// The time of this stat point.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
// CPU statistics
|
||||
HasCpu bool `json:"has_cpu"`
|
||||
Cpu v1.CpuStats `json:"cpu,omitempty"`
|
||||
HasCpu bool `json:"has_cpu"`
|
||||
// In nanoseconds (aggregated)
|
||||
Cpu v1.CpuStats `json:"cpu,omitempty"`
|
||||
// In nanocores per second (instantaneous)
|
||||
CpuInst *CpuInstStats `json:"cpu_inst,omitempty"`
|
||||
// Disk IO statistics
|
||||
HasDiskIo bool `json:"has_diskio"`
|
||||
DiskIo v1.DiskIoStats `json:"diskio,omitempty"`
|
||||
|
@ -204,3 +210,27 @@ type NetworkStats struct {
|
|||
// Network stats by interface.
|
||||
Interfaces []v1.InterfaceStats `json:"interfaces,omitempty"`
|
||||
}
|
||||
|
||||
// Instantaneous CPU stats
|
||||
type CpuInstStats struct {
|
||||
Usage CpuInstUsage `json:"usage"`
|
||||
}
|
||||
|
||||
// CPU usage time statistics.
|
||||
type CpuInstUsage struct {
|
||||
// Total CPU usage.
|
||||
// Units: nanocores per second
|
||||
Total uint64 `json:"total"`
|
||||
|
||||
// Per CPU/core usage of the container.
|
||||
// Unit: nanocores per second
|
||||
PerCpu []uint64 `json:"per_cpu_usage,omitempty"`
|
||||
|
||||
// Time spent in user space.
|
||||
// Unit: nanocores per second
|
||||
User uint64 `json:"user"`
|
||||
|
||||
// Time spent in kernel space.
|
||||
// Unit: nanocores per second
|
||||
System uint64 `json:"system"`
|
||||
}
|
||||
|
|
|
@ -380,6 +380,7 @@ func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec {
|
|||
HasNetwork: specV1.HasNetwork,
|
||||
HasDiskIo: specV1.HasDiskIo,
|
||||
HasCustomMetrics: specV1.HasCustomMetrics,
|
||||
Image: specV1.Image,
|
||||
}
|
||||
if specV1.HasCpu {
|
||||
specV2.Cpu.Limit = specV1.Cpu.Limit
|
||||
|
@ -736,7 +737,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
|
|||
|
||||
// Create a container.
|
||||
func (m *manager) createContainer(containerName string) error {
|
||||
handler, accept, err := container.NewContainerHandler(containerName)
|
||||
handler, accept, err := container.NewContainerHandler(containerName, m.inHostNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ type containerMetric struct {
|
|||
}
|
||||
|
||||
func (cm *containerMetric) desc() *prometheus.Desc {
|
||||
return prometheus.NewDesc(cm.name, cm.help, append([]string{"name", "id"}, cm.extraLabels...), nil)
|
||||
return prometheus.NewDesc(cm.name, cm.help, append([]string{"name", "id", "image"}, cm.extraLabels...), nil)
|
||||
}
|
||||
|
||||
// PrometheusCollector implements prometheus.Collector.
|
||||
|
@ -287,60 +287,124 @@ func NewPrometheusCollector(infoProvider subcontainersInfoProvider) *PrometheusC
|
|||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_network_receive_bytes_total",
|
||||
help: "Cumulative count of bytes received",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_receive_bytes_total",
|
||||
help: "Cumulative count of bytes received",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.RxBytes)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.RxBytes),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_receive_packets_total",
|
||||
help: "Cumulative count of packets received",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_receive_packets_total",
|
||||
help: "Cumulative count of packets received",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.RxPackets)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.RxPackets),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_receive_packets_dropped_total",
|
||||
help: "Cumulative count of packets dropped while receiving",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_receive_packets_dropped_total",
|
||||
help: "Cumulative count of packets dropped while receiving",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.RxDropped)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.RxDropped),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_receive_errors_total",
|
||||
help: "Cumulative count of errors encountered while receiving",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_receive_errors_total",
|
||||
help: "Cumulative count of errors encountered while receiving",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.RxErrors)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.RxErrors),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_transmit_bytes_total",
|
||||
help: "Cumulative count of bytes transmitted",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_transmit_bytes_total",
|
||||
help: "Cumulative count of bytes transmitted",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.TxBytes)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.TxBytes),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_transmit_packets_total",
|
||||
help: "Cumulative count of packets transmitted",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_transmit_packets_total",
|
||||
help: "Cumulative count of packets transmitted",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.TxPackets)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.TxPackets),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_transmit_packets_dropped_total",
|
||||
help: "Cumulative count of packets dropped while transmitting",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_transmit_packets_dropped_total",
|
||||
help: "Cumulative count of packets dropped while transmitting",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.TxDropped)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.TxDropped),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_transmit_errors_total",
|
||||
help: "Cumulative count of errors encountered while transmitting",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_transmit_errors_total",
|
||||
help: "Cumulative count of errors encountered while transmitting",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.TxErrors)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.TxErrors),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_tasks_state",
|
||||
|
@ -401,12 +465,13 @@ func (c *PrometheusCollector) Collect(ch chan<- prometheus.Metric) {
|
|||
if len(container.Aliases) > 0 {
|
||||
name = container.Aliases[0]
|
||||
}
|
||||
image := container.Spec.Image
|
||||
stats := container.Stats[0]
|
||||
|
||||
for _, cm := range c.containerMetrics {
|
||||
desc := cm.desc()
|
||||
for _, metricValue := range cm.getValues(stats) {
|
||||
ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append([]string{name, id}, metricValue.labels...)...)
|
||||
ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append([]string{name, id, image}, metricValue.labels...)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,9 @@ func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.Container
|
|||
ContainerReference: info.ContainerReference{
|
||||
Name: "testcontainer",
|
||||
},
|
||||
Spec: info.ContainerSpec{
|
||||
Image: "test",
|
||||
},
|
||||
Stats: []*info.ContainerStats{
|
||||
{
|
||||
Cpu: info.CpuStats{
|
||||
|
@ -68,6 +71,19 @@ func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.Container
|
|||
TxErrors: 20,
|
||||
TxDropped: 21,
|
||||
},
|
||||
Interfaces: []info.InterfaceStats{
|
||||
{
|
||||
Name: "eth0",
|
||||
RxBytes: 14,
|
||||
RxPackets: 15,
|
||||
RxErrors: 16,
|
||||
RxDropped: 17,
|
||||
TxBytes: 18,
|
||||
TxPackets: 19,
|
||||
TxErrors: 20,
|
||||
TxDropped: 21,
|
||||
},
|
||||
},
|
||||
},
|
||||
Filesystem: []info.FsStats{
|
||||
{
|
||||
|
|
155
Godeps/_workspace/src/github.com/google/cadvisor/metrics/testdata/prometheus_metrics
generated
vendored
155
Godeps/_workspace/src/github.com/google/cadvisor/metrics/testdata/prometheus_metrics
generated
vendored
|
@ -1,155 +0,0 @@
|
|||
# HELP container_cpu_system_seconds_total Cumulative system cpu time consumed in seconds.
|
||||
# TYPE container_cpu_system_seconds_total counter
|
||||
container_cpu_system_seconds_total{id="testcontainer",name="testcontainer"} 7e-09
|
||||
# HELP container_cpu_usage_seconds_total Cumulative cpu time consumed per cpu in seconds.
|
||||
# TYPE container_cpu_usage_seconds_total counter
|
||||
container_cpu_usage_seconds_total{cpu="cpu00",id="testcontainer",name="testcontainer"} 2e-09
|
||||
container_cpu_usage_seconds_total{cpu="cpu01",id="testcontainer",name="testcontainer"} 3e-09
|
||||
container_cpu_usage_seconds_total{cpu="cpu02",id="testcontainer",name="testcontainer"} 4e-09
|
||||
container_cpu_usage_seconds_total{cpu="cpu03",id="testcontainer",name="testcontainer"} 5e-09
|
||||
# HELP container_cpu_user_seconds_total Cumulative user cpu time consumed in seconds.
|
||||
# TYPE container_cpu_user_seconds_total counter
|
||||
container_cpu_user_seconds_total{id="testcontainer",name="testcontainer"} 6e-09
|
||||
# HELP container_fs_io_current Number of I/Os currently in progress
|
||||
# TYPE container_fs_io_current gauge
|
||||
container_fs_io_current{device="sda1",id="testcontainer",name="testcontainer"} 42
|
||||
container_fs_io_current{device="sda2",id="testcontainer",name="testcontainer"} 47
|
||||
# HELP container_fs_io_time_seconds_total Cumulative count of seconds spent doing I/Os
|
||||
# TYPE container_fs_io_time_seconds_total counter
|
||||
container_fs_io_time_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 4.3e-08
|
||||
container_fs_io_time_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.8e-08
|
||||
# HELP container_fs_io_time_weighted_seconds_total Cumulative weighted I/O time in seconds
|
||||
# TYPE container_fs_io_time_weighted_seconds_total counter
|
||||
container_fs_io_time_weighted_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 4.4e-08
|
||||
container_fs_io_time_weighted_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.9e-08
|
||||
# HELP container_fs_limit_bytes Number of bytes that can be consumed by the container on this filesystem.
|
||||
# TYPE container_fs_limit_bytes gauge
|
||||
container_fs_limit_bytes{device="sda1",id="testcontainer",name="testcontainer"} 22
|
||||
container_fs_limit_bytes{device="sda2",id="testcontainer",name="testcontainer"} 37
|
||||
# HELP container_fs_read_seconds_total Cumulative count of seconds spent reading
|
||||
# TYPE container_fs_read_seconds_total counter
|
||||
container_fs_read_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 2.7e-08
|
||||
container_fs_read_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.2e-08
|
||||
# HELP container_fs_reads_merged_total Cumulative count of reads merged
|
||||
# TYPE container_fs_reads_merged_total counter
|
||||
container_fs_reads_merged_total{device="sda1",id="testcontainer",name="testcontainer"} 25
|
||||
container_fs_reads_merged_total{device="sda2",id="testcontainer",name="testcontainer"} 40
|
||||
# HELP container_fs_reads_total Cumulative count of reads completed
|
||||
# TYPE container_fs_reads_total counter
|
||||
container_fs_reads_total{device="sda1",id="testcontainer",name="testcontainer"} 24
|
||||
container_fs_reads_total{device="sda2",id="testcontainer",name="testcontainer"} 39
|
||||
# HELP container_fs_sector_reads_total Cumulative count of sector reads completed
|
||||
# TYPE container_fs_sector_reads_total counter
|
||||
container_fs_sector_reads_total{device="sda1",id="testcontainer",name="testcontainer"} 26
|
||||
container_fs_sector_reads_total{device="sda2",id="testcontainer",name="testcontainer"} 41
|
||||
# HELP container_fs_sector_writes_total Cumulative count of sector writes completed
|
||||
# TYPE container_fs_sector_writes_total counter
|
||||
container_fs_sector_writes_total{device="sda1",id="testcontainer",name="testcontainer"} 40
|
||||
container_fs_sector_writes_total{device="sda2",id="testcontainer",name="testcontainer"} 45
|
||||
# HELP container_fs_usage_bytes Number of bytes that are consumed by the container on this filesystem.
|
||||
# TYPE container_fs_usage_bytes gauge
|
||||
container_fs_usage_bytes{device="sda1",id="testcontainer",name="testcontainer"} 23
|
||||
container_fs_usage_bytes{device="sda2",id="testcontainer",name="testcontainer"} 38
|
||||
# HELP container_fs_write_seconds_total Cumulative count of seconds spent writing
|
||||
# TYPE container_fs_write_seconds_total counter
|
||||
container_fs_write_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 4.1e-08
|
||||
container_fs_write_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.6e-08
|
||||
# HELP container_fs_writes_merged_total Cumulative count of writes merged
|
||||
# TYPE container_fs_writes_merged_total counter
|
||||
container_fs_writes_merged_total{device="sda1",id="testcontainer",name="testcontainer"} 39
|
||||
container_fs_writes_merged_total{device="sda2",id="testcontainer",name="testcontainer"} 44
|
||||
# HELP container_fs_writes_total Cumulative count of writes completed
|
||||
# TYPE container_fs_writes_total counter
|
||||
container_fs_writes_total{device="sda1",id="testcontainer",name="testcontainer"} 28
|
||||
container_fs_writes_total{device="sda2",id="testcontainer",name="testcontainer"} 43
|
||||
# HELP container_last_seen Last time a container was seen by the exporter
|
||||
# TYPE container_last_seen gauge
|
||||
container_last_seen{id="testcontainer",name="testcontainer"} 1.426203694e+09
|
||||
# HELP container_memory_failures_total Cumulative count of memory allocation failures.
|
||||
# TYPE container_memory_failures_total counter
|
||||
container_memory_failures_total{id="testcontainer",name="testcontainer",scope="container",type="pgfault"} 10
|
||||
container_memory_failures_total{id="testcontainer",name="testcontainer",scope="container",type="pgmajfault"} 11
|
||||
container_memory_failures_total{id="testcontainer",name="testcontainer",scope="hierarchy",type="pgfault"} 12
|
||||
container_memory_failures_total{id="testcontainer",name="testcontainer",scope="hierarchy",type="pgmajfault"} 13
|
||||
# HELP container_memory_usage_bytes Current memory usage in bytes.
|
||||
# TYPE container_memory_usage_bytes gauge
|
||||
container_memory_usage_bytes{id="testcontainer",name="testcontainer"} 8
|
||||
# HELP container_memory_working_set_bytes Current working set in bytes.
|
||||
# TYPE container_memory_working_set_bytes gauge
|
||||
container_memory_working_set_bytes{id="testcontainer",name="testcontainer"} 9
|
||||
# HELP container_network_receive_bytes_total Cumulative count of bytes received
|
||||
# TYPE container_network_receive_bytes_total counter
|
||||
container_network_receive_bytes_total{id="testcontainer",name="testcontainer"} 14
|
||||
# HELP container_network_receive_errors_total Cumulative count of errors encountered while receiving
|
||||
# TYPE container_network_receive_errors_total counter
|
||||
container_network_receive_errors_total{id="testcontainer",name="testcontainer"} 16
|
||||
# HELP container_network_receive_packets_dropped_total Cumulative count of packets dropped while receiving
|
||||
# TYPE container_network_receive_packets_dropped_total counter
|
||||
container_network_receive_packets_dropped_total{id="testcontainer",name="testcontainer"} 17
|
||||
# HELP container_network_receive_packets_total Cumulative count of packets received
|
||||
# TYPE container_network_receive_packets_total counter
|
||||
container_network_receive_packets_total{id="testcontainer",name="testcontainer"} 15
|
||||
# HELP container_network_transmit_bytes_total Cumulative count of bytes transmitted
|
||||
# TYPE container_network_transmit_bytes_total counter
|
||||
container_network_transmit_bytes_total{id="testcontainer",name="testcontainer"} 18
|
||||
# HELP container_network_transmit_errors_total Cumulative count of errors encountered while transmitting
|
||||
# TYPE container_network_transmit_errors_total counter
|
||||
container_network_transmit_errors_total{id="testcontainer",name="testcontainer"} 20
|
||||
# HELP container_network_transmit_packets_dropped_total Cumulative count of packets dropped while transmitting
|
||||
# TYPE container_network_transmit_packets_dropped_total counter
|
||||
container_network_transmit_packets_dropped_total{id="testcontainer",name="testcontainer"} 21
|
||||
# HELP container_network_transmit_packets_total Cumulative count of packets transmitted
|
||||
# TYPE container_network_transmit_packets_total counter
|
||||
container_network_transmit_packets_total{id="testcontainer",name="testcontainer"} 19
|
||||
# HELP container_scrape_error 1 if there was an error while getting container metrics, 0 otherwise
|
||||
# TYPE container_scrape_error gauge
|
||||
container_scrape_error 0
|
||||
# HELP container_tasks_state Number of tasks in given state
|
||||
# TYPE container_tasks_state gauge
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="iowaiting"} 54
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="running"} 51
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="sleeping"} 50
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="stopped"} 52
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="uninterruptible"} 53
|
||||
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
|
||||
# TYPE http_request_duration_microseconds summary
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="prometheus"} 0
|
||||
http_request_duration_microseconds_count{handler="prometheus"} 0
|
||||
# HELP http_request_size_bytes The HTTP request sizes in bytes.
|
||||
# TYPE http_request_size_bytes summary
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="prometheus"} 0
|
||||
http_request_size_bytes_count{handler="prometheus"} 0
|
||||
# HELP http_response_size_bytes The HTTP response sizes in bytes.
|
||||
# TYPE http_response_size_bytes summary
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="prometheus"} 0
|
||||
http_response_size_bytes_count{handler="prometheus"} 0
|
||||
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
|
||||
# TYPE process_cpu_seconds_total counter
|
||||
process_cpu_seconds_total 0
|
||||
# HELP process_goroutines Number of goroutines that currently exist.
|
||||
# TYPE process_goroutines gauge
|
||||
process_goroutines 16
|
||||
# HELP process_max_fds Maximum number of open file descriptors.
|
||||
# TYPE process_max_fds gauge
|
||||
process_max_fds 1024
|
||||
# HELP process_open_fds Number of open file descriptors.
|
||||
# TYPE process_open_fds gauge
|
||||
process_open_fds 4
|
||||
# HELP process_resident_memory_bytes Resident memory size in bytes.
|
||||
# TYPE process_resident_memory_bytes gauge
|
||||
process_resident_memory_bytes 7.74144e+06
|
||||
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
|
||||
# TYPE process_start_time_seconds gauge
|
||||
process_start_time_seconds 1.42620369439e+09
|
||||
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
|
||||
# TYPE process_virtual_memory_bytes gauge
|
||||
process_virtual_memory_bytes 1.16420608e+08
|
251
Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/testdata/cpuinfo
generated
vendored
251
Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/testdata/cpuinfo
generated
vendored
|
@ -1,251 +0,0 @@
|
|||
processor : 0
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 0
|
||||
siblings : 6
|
||||
core id : 0
|
||||
cpu cores : 6
|
||||
apicid : 0
|
||||
initial apicid : 0
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 1
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 0
|
||||
siblings : 6
|
||||
core id : 1
|
||||
cpu cores : 6
|
||||
apicid : 2
|
||||
initial apicid : 2
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 2
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 0
|
||||
siblings : 6
|
||||
core id : 2
|
||||
cpu cores : 6
|
||||
apicid : 4
|
||||
initial apicid : 4
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 3
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 1
|
||||
siblings : 6
|
||||
core id : 3
|
||||
cpu cores : 6
|
||||
apicid : 16
|
||||
initial apicid : 16
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 4
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 1
|
||||
siblings : 6
|
||||
core id : 4
|
||||
cpu cores : 6
|
||||
apicid : 18
|
||||
initial apicid : 18
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 5
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 1
|
||||
siblings : 6
|
||||
core id : 5
|
||||
cpu cores : 6
|
||||
apicid : 20
|
||||
initial apicid : 20
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 6
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 2661.000
|
||||
cache size : 12288 KB
|
||||
physical id : 0
|
||||
siblings : 6
|
||||
core id : 0
|
||||
cpu cores : 6
|
||||
apicid : 1
|
||||
initial apicid : 1
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 7
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 2661.000
|
||||
cache size : 12288 KB
|
||||
physical id : 0
|
||||
siblings : 6
|
||||
core id : 1
|
||||
cpu cores : 6
|
||||
apicid : 3
|
||||
initial apicid : 3
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 8
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 0
|
||||
siblings : 6
|
||||
core id : 2
|
||||
cpu cores : 6
|
||||
apicid : 5
|
||||
initial apicid : 5
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 9
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 2661.000
|
||||
cache size : 12288 KB
|
||||
physical id : 1
|
||||
siblings : 6
|
||||
core id : 3
|
||||
cpu cores : 6
|
||||
apicid : 17
|
||||
initial apicid : 17
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 10
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 1
|
||||
siblings : 6
|
||||
core id : 4
|
||||
cpu cores : 6
|
||||
apicid : 19
|
||||
initial apicid : 19
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
processor : 11
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 2661.000
|
||||
cache size : 12288 KB
|
||||
physical id : 1
|
||||
siblings : 6
|
||||
core id : 5
|
||||
cpu cores : 6
|
||||
apicid : 21
|
||||
initial apicid : 21
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
Loading…
Reference in New Issue