mirror of https://github.com/k3s-io/k3s
cadvisor bump
parent
8fcb0dfb75
commit
a5421270e5
|
@ -281,93 +281,93 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/api",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/cache/memory",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/collector",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/events",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/fs",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/healthz",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/http",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v1",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v2",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/manager",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/metrics",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/pages",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/storage",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/summary",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/validate",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/version",
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
"Comment": "0.19.0",
|
||||
"Rev": "0d6015c74114de9503d68c3c91efceee4dc41bd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
|
|
|
@ -42,6 +42,10 @@ var DockerNamespace = "docker"
|
|||
var dockerRootDir = flag.String("docker_root", "/var/lib/docker", "Absolute path to the Docker state root directory (default: /var/lib/docker)")
|
||||
var dockerRunDir = flag.String("docker_run", "/var/run/docker", "Absolute path to the Docker run directory (default: /var/run/docker)")
|
||||
|
||||
// Regexp that identifies docker cgroups, containers started with
|
||||
// --cgroup-parent have another prefix than 'docker'
|
||||
var dockerCgroupRegexp = regexp.MustCompile(`.+-([a-z0-9]{64})\.scope$`)
|
||||
|
||||
// TODO(vmarmol): Export run dir too for newer Dockers.
|
||||
// Directory holding Docker container state information.
|
||||
func DockerStateDir() string {
|
||||
|
@ -119,27 +123,30 @@ func ContainerNameToDockerId(name string) string {
|
|||
|
||||
// Turn systemd cgroup name into Docker ID.
|
||||
if UseSystemd() {
|
||||
id = strings.TrimPrefix(id, "docker-")
|
||||
id = strings.TrimSuffix(id, ".scope")
|
||||
if matches := dockerCgroupRegexp.FindStringSubmatch(id); matches != nil {
|
||||
id = matches[1]
|
||||
}
|
||||
}
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
// Returns a full container name for the specified Docker ID.
|
||||
func FullContainerName(dockerId string) string {
|
||||
// Add the full container name.
|
||||
func isContainerName(name string) bool {
|
||||
if UseSystemd() {
|
||||
return path.Join("/system.slice", fmt.Sprintf("docker-%s.scope", dockerId))
|
||||
} else {
|
||||
return path.Join("/docker", dockerId)
|
||||
return dockerCgroupRegexp.MatchString(path.Base(name))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Docker handles all containers under /docker
|
||||
func (self *dockerFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
// docker factory accepts all containers it can handle.
|
||||
canAccept := true
|
||||
|
||||
if !isContainerName(name) {
|
||||
return false, canAccept, fmt.Errorf("invalid container name")
|
||||
}
|
||||
|
||||
// Check if the container is known to docker and it is active.
|
||||
id := ContainerNameToDockerId(name)
|
||||
|
||||
|
|
109
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/fsHandler.go
generated
vendored
Normal file
109
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/fsHandler.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Handler for Docker containers.
|
||||
package docker
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/fs"
|
||||
)
|
||||
|
||||
type fsHandler interface {
|
||||
start()
|
||||
usage() uint64
|
||||
stop()
|
||||
}
|
||||
|
||||
type realFsHandler struct {
|
||||
sync.RWMutex
|
||||
lastUpdate time.Time
|
||||
usageBytes uint64
|
||||
period time.Duration
|
||||
storageDirs []string
|
||||
fsInfo fs.FsInfo
|
||||
// Tells the container to stop.
|
||||
stopChan chan struct{}
|
||||
}
|
||||
|
||||
const longDu = time.Second
|
||||
|
||||
var _ fsHandler = &realFsHandler{}
|
||||
|
||||
func newFsHandler(period time.Duration, storageDirs []string, fsInfo fs.FsInfo) fsHandler {
|
||||
return &realFsHandler{
|
||||
lastUpdate: time.Time{},
|
||||
usageBytes: 0,
|
||||
period: period,
|
||||
storageDirs: storageDirs,
|
||||
fsInfo: fsInfo,
|
||||
stopChan: make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) needsUpdate() bool {
|
||||
return time.Now().After(fh.lastUpdate.Add(fh.period))
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) update() error {
|
||||
var usage uint64
|
||||
for _, dir := range fh.storageDirs {
|
||||
// TODO(Vishh): Add support for external mounts.
|
||||
dirUsage, err := fh.fsInfo.GetDirUsage(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
usage += dirUsage
|
||||
}
|
||||
fh.Lock()
|
||||
defer fh.Unlock()
|
||||
fh.lastUpdate = time.Now()
|
||||
fh.usageBytes = usage
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) trackUsage() {
|
||||
for {
|
||||
start := time.Now()
|
||||
if _, ok := <-fh.stopChan; !ok {
|
||||
return
|
||||
}
|
||||
if err := fh.update(); err != nil {
|
||||
glog.V(2).Infof("failed to collect filesystem stats - %v", err)
|
||||
}
|
||||
duration := time.Since(start)
|
||||
if duration > longDu {
|
||||
glog.V(3).Infof("`du` on following dirs took %v: %v", duration, fh.storageDirs)
|
||||
}
|
||||
next := start.Add(fh.period)
|
||||
time.Sleep(next.Sub(time.Now()))
|
||||
}
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) start() {
|
||||
go fh.trackUsage()
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) stop() {
|
||||
close(fh.stopChan)
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) usage() uint64 {
|
||||
fh.RLock()
|
||||
defer fh.RUnlock()
|
||||
return fh.usageBytes
|
||||
}
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/docker/libcontainer/cgroups"
|
||||
cgroup_fs "github.com/docker/libcontainer/cgroups/fs"
|
||||
libcontainerConfigs "github.com/docker/libcontainer/configs"
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
"github.com/google/cadvisor/container"
|
||||
containerLibcontainer "github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/fs"
|
||||
|
@ -83,6 +83,9 @@ type dockerContainerHandler struct {
|
|||
|
||||
// The network mode of the container
|
||||
networkMode string
|
||||
|
||||
// Filesystem handler.
|
||||
fsHandler fsHandler
|
||||
}
|
||||
|
||||
func newDockerContainerHandler(
|
||||
|
@ -114,6 +117,9 @@ func newDockerContainerHandler(
|
|||
}
|
||||
|
||||
id := ContainerNameToDockerId(name)
|
||||
|
||||
storageDirs := []string{path.Join(*dockerRootDir, pathToAufsDir, id)}
|
||||
|
||||
handler := &dockerContainerHandler{
|
||||
id: id,
|
||||
client: client,
|
||||
|
@ -124,8 +130,13 @@ func newDockerContainerHandler(
|
|||
usesAufsDriver: usesAufsDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootFs: rootFs,
|
||||
storageDirs: storageDirs,
|
||||
fsHandler: newFsHandler(time.Minute, storageDirs, fsInfo),
|
||||
}
|
||||
|
||||
if usesAufsDriver {
|
||||
handler.fsHandler.start()
|
||||
}
|
||||
handler.storageDirs = append(handler.storageDirs, path.Join(*dockerRootDir, pathToAufsDir, id))
|
||||
|
||||
// We assume that if Inspect fails then the container is not known to docker.
|
||||
ctnr, err := client.InspectContainer(id)
|
||||
|
@ -136,8 +147,7 @@ func newDockerContainerHandler(
|
|||
handler.pid = ctnr.State.Pid
|
||||
|
||||
// Add the name and bare ID as aliases of the container.
|
||||
handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"))
|
||||
handler.aliases = append(handler.aliases, id)
|
||||
handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"), id)
|
||||
handler.labels = ctnr.Config.Labels
|
||||
handler.image = ctnr.Config.Image
|
||||
handler.networkMode = ctnr.HostConfig.NetworkMode
|
||||
|
@ -256,16 +266,7 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
|||
|
||||
fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit}
|
||||
|
||||
var usage uint64 = 0
|
||||
for _, dir := range self.storageDirs {
|
||||
// TODO(Vishh): Add support for external mounts.
|
||||
dirUsage, err := self.fsInfo.GetDirUsage(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
usage += dirUsage
|
||||
}
|
||||
fsStat.Usage = usage
|
||||
fsStat.Usage = self.fsHandler.usage()
|
||||
stats.Filesystem = append(stats.Filesystem, fsStat)
|
||||
|
||||
return nil
|
||||
|
@ -295,32 +296,8 @@ func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
|
|||
}
|
||||
|
||||
func (self *dockerContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
|
||||
if self.name != "/docker" {
|
||||
return []info.ContainerReference{}, nil
|
||||
}
|
||||
opt := docker.ListContainersOptions{
|
||||
All: true,
|
||||
}
|
||||
containers, err := self.client.ListContainers(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := make([]info.ContainerReference, 0, len(containers)+1)
|
||||
for _, c := range containers {
|
||||
if !strings.HasPrefix(c.Status, "Up ") {
|
||||
continue
|
||||
}
|
||||
|
||||
ref := info.ContainerReference{
|
||||
Name: FullContainerName(c.ID),
|
||||
Aliases: append(c.Names, c.ID),
|
||||
Namespace: DockerNamespace,
|
||||
}
|
||||
ret = append(ret, ref)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
// No-op for Docker driver.
|
||||
return []info.ContainerReference{}, nil
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||
|
|
87
Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
87
Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
|
@ -97,6 +97,20 @@ func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int) (*info.Cont
|
|||
} else {
|
||||
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
|
||||
}
|
||||
|
||||
t, err := tcpStatsFromProc(rootFs, pid, "net/tcp")
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Unable to get tcp stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Tcp = t
|
||||
}
|
||||
|
||||
t6, err := tcpStatsFromProc(rootFs, pid, "net/tcp6")
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Unable to get tcp6 stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Tcp6 = t6
|
||||
}
|
||||
}
|
||||
|
||||
// For backwards compatibility.
|
||||
|
@ -173,6 +187,78 @@ func scanInterfaceStats(netStatsFile string) ([]info.InterfaceStats, error) {
|
|||
return stats, nil
|
||||
}
|
||||
|
||||
func tcpStatsFromProc(rootFs string, pid int, file string) (info.TcpStat, error) {
|
||||
tcpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
||||
|
||||
tcpStats, err := scanTcpStats(tcpStatsFile)
|
||||
if err != nil {
|
||||
return tcpStats, fmt.Errorf("couldn't read tcp stats: %v", err)
|
||||
}
|
||||
|
||||
return tcpStats, nil
|
||||
}
|
||||
|
||||
func scanTcpStats(tcpStatsFile string) (info.TcpStat, error) {
|
||||
|
||||
var stats info.TcpStat
|
||||
|
||||
data, err := ioutil.ReadFile(tcpStatsFile)
|
||||
if err != nil {
|
||||
return stats, fmt.Errorf("failure opening %s: %v", tcpStatsFile, err)
|
||||
}
|
||||
|
||||
tcpStatLineRE, _ := regexp.Compile("[0-9:].*")
|
||||
|
||||
tcpStateMap := map[string]uint64{
|
||||
"01": 0, //ESTABLISHED
|
||||
"02": 0, //SYN_SENT
|
||||
"03": 0, //SYN_RECV
|
||||
"04": 0, //FIN_WAIT1
|
||||
"05": 0, //FIN_WAIT2
|
||||
"06": 0, //TIME_WAIT
|
||||
"07": 0, //CLOSE
|
||||
"08": 0, //CLOSE_WAIT
|
||||
"09": 0, //LAST_ACK
|
||||
"0A": 0, //LISTEN
|
||||
"0B": 0, //CLOSING
|
||||
}
|
||||
|
||||
reader := strings.NewReader(string(data))
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
for scanner.Scan() {
|
||||
|
||||
line := scanner.Text()
|
||||
//skip header
|
||||
matched := tcpStatLineRE.MatchString(line)
|
||||
|
||||
if matched {
|
||||
state := strings.Fields(line)
|
||||
//#file header tcp state is the 4 filed:
|
||||
//sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
|
||||
tcpStateMap[state[3]]++
|
||||
}
|
||||
}
|
||||
|
||||
stats = info.TcpStat{
|
||||
Established: tcpStateMap["01"],
|
||||
SynSent: tcpStateMap["02"],
|
||||
SynRecv: tcpStateMap["03"],
|
||||
FinWait1: tcpStateMap["04"],
|
||||
FinWait2: tcpStateMap["05"],
|
||||
TimeWait: tcpStateMap["06"],
|
||||
Close: tcpStateMap["07"],
|
||||
CloseWait: tcpStateMap["08"],
|
||||
LastAck: tcpStateMap["09"],
|
||||
Listen: tcpStateMap["0A"],
|
||||
Closing: tcpStateMap["0B"],
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func GetProcesses(cgroupManager cgroups.Manager) ([]int, error) {
|
||||
pids, err := cgroupManager.GetPids()
|
||||
if err != nil {
|
||||
|
@ -262,6 +348,7 @@ func toContainerStats1(s *cgroups.Stats, ret *info.ContainerStats) {
|
|||
|
||||
func toContainerStats2(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.Usage = s.MemoryStats.Usage
|
||||
ret.Memory.Failcnt = s.MemoryStats.Failcnt
|
||||
if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgfault = v
|
||||
ret.Memory.HierarchicalData.Pgfault = v
|
||||
|
|
|
@ -282,7 +282,7 @@ func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
|
|||
}
|
||||
|
||||
func (self *RealFsInfo) GetDirUsage(dir string) (uint64, error) {
|
||||
out, err := exec.Command("du", "-s", dir).CombinedOutput()
|
||||
out, err := exec.Command("nice", "-n", "19", "du", "-s", dir).CombinedOutput()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("du command failed on %s with output %s - %s", dir, out, err)
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func RegisterHandlers(mux httpMux.Mux, containerManager manager.Manager, httpAuthFile, httpAuthRealm, httpDigestFile, httpDigestRealm, prometheusEndpoint string) error {
|
||||
func RegisterHandlers(mux httpMux.Mux, containerManager manager.Manager, httpAuthFile, httpAuthRealm, httpDigestFile, httpDigestRealm string) error {
|
||||
// Basic health handler.
|
||||
if err := healthz.RegisterHandler(mux); err != nil {
|
||||
return fmt.Errorf("failed to register healthz handler: %s", err)
|
||||
|
@ -85,13 +85,15 @@ func RegisterHandlers(mux httpMux.Mux, containerManager manager.Manager, httpAut
|
|||
}
|
||||
}
|
||||
|
||||
collector := metrics.NewPrometheusCollector(containerManager)
|
||||
prometheus.MustRegister(collector)
|
||||
http.Handle(prometheusEndpoint, prometheus.Handler())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RegisterPrometheusHandler(mux httpMux.Mux, containerManager manager.Manager, prometheusEndpoint string, containerNameToLabelsFunc metrics.ContainerNameToLabelsFunc) {
|
||||
collector := metrics.NewPrometheusCollector(containerManager, containerNameToLabelsFunc)
|
||||
prometheus.MustRegister(collector)
|
||||
mux.Handle(prometheusEndpoint, prometheus.Handler())
|
||||
}
|
||||
|
||||
func staticHandlerNoAuth(w http.ResponseWriter, r *http.Request) {
|
||||
err := static.HandleRequest(w, r.URL)
|
||||
if err != nil {
|
||||
|
|
|
@ -312,6 +312,8 @@ type MemoryStats struct {
|
|||
// Units: Bytes.
|
||||
WorkingSet uint64 `json:"working_set"`
|
||||
|
||||
Failcnt uint64 `json:"failcnt"`
|
||||
|
||||
ContainerData MemoryStatsMemoryData `json:"container_data,omitempty"`
|
||||
HierarchicalData MemoryStatsMemoryData `json:"hierarchical_data,omitempty"`
|
||||
}
|
||||
|
@ -345,6 +347,35 @@ type InterfaceStats struct {
|
|||
type NetworkStats struct {
|
||||
InterfaceStats `json:",inline"`
|
||||
Interfaces []InterfaceStats `json:"interfaces,omitempty"`
|
||||
// TCP connection stats (Established, Listen...)
|
||||
Tcp TcpStat `json:"tcp"`
|
||||
// TCP6 connection stats (Established, Listen...)
|
||||
Tcp6 TcpStat `json:"tcp6"`
|
||||
}
|
||||
|
||||
type TcpStat struct {
|
||||
//Count of TCP connections in state "Established"
|
||||
Established uint64
|
||||
//Count of TCP connections in state "Syn_Sent"
|
||||
SynSent uint64
|
||||
//Count of TCP connections in state "Syn_Recv"
|
||||
SynRecv uint64
|
||||
//Count of TCP connections in state "Fin_Wait1"
|
||||
FinWait1 uint64
|
||||
//Count of TCP connections in state "Fin_Wait2"
|
||||
FinWait2 uint64
|
||||
//Count of TCP connections in state "Time_Wait
|
||||
TimeWait uint64
|
||||
//Count of TCP connections in state "Close"
|
||||
Close uint64
|
||||
//Count of TCP connections in state "Close_Wait"
|
||||
CloseWait uint64
|
||||
//Count of TCP connections in state "Listen_Ack"
|
||||
LastAck uint64
|
||||
//Count of TCP connections in state "Listen"
|
||||
Listen uint64
|
||||
//Count of TCP connections in state "Closing"
|
||||
Closing uint64
|
||||
}
|
||||
|
||||
type FsStats struct {
|
||||
|
|
|
@ -179,6 +179,8 @@ type VersionInfo struct {
|
|||
|
||||
// cAdvisor version.
|
||||
CadvisorVersion string `json:"cadvisor_version"`
|
||||
// cAdvisor git revision.
|
||||
CadvisorRevision string `json:"cadvisor_revision"`
|
||||
}
|
||||
|
||||
type MachineInfoFactory interface {
|
||||
|
|
|
@ -206,9 +206,27 @@ type ProcessInfo struct {
|
|||
Cmd string `json:"cmd"`
|
||||
}
|
||||
|
||||
type TcpStat struct {
|
||||
Established uint64
|
||||
SynSent uint64
|
||||
SynRecv uint64
|
||||
FinWait1 uint64
|
||||
FinWait2 uint64
|
||||
TimeWait uint64
|
||||
Close uint64
|
||||
CloseWait uint64
|
||||
LastAck uint64
|
||||
Listen uint64
|
||||
Closing uint64
|
||||
}
|
||||
|
||||
type NetworkStats struct {
|
||||
// Network stats by interface.
|
||||
Interfaces []v1.InterfaceStats `json:"interfaces,omitempty"`
|
||||
// TCP connection stats (Established, Listen...)
|
||||
Tcp TcpStat `json:"tcp"`
|
||||
// TCP6 connection stats (Established, Listen...)
|
||||
Tcp6 TcpStat `json:"tcp6"`
|
||||
}
|
||||
|
||||
// Instantaneous CPU stats
|
||||
|
|
|
@ -42,7 +42,7 @@ import (
|
|||
// Housekeeping interval.
|
||||
var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings")
|
||||
|
||||
var cgroupPathRegExp = regexp.MustCompile(".*devices:(.*?)[,;$].*")
|
||||
var cgroupPathRegExp = regexp.MustCompile(".*devices.*:(.*?)[,;$].*")
|
||||
|
||||
// Decay value used for load average smoothing. Interval length of 10 seconds is used.
|
||||
var loadDecay = math.Exp(float64(-1 * (*HousekeepingInterval).Seconds() / 10))
|
||||
|
|
|
@ -122,7 +122,8 @@ func getVersionInfo() (*info.VersionInfo, error) {
|
|||
KernelVersion: kernel_version,
|
||||
ContainerOsVersion: container_os,
|
||||
DockerVersion: docker_version,
|
||||
CadvisorVersion: version.VERSION,
|
||||
CadvisorVersion: version.Info["version"],
|
||||
CadvisorRevision: version.Info["revision"],
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -24,9 +24,13 @@ import (
|
|||
)
|
||||
|
||||
// This will usually be manager.Manager, but can be swapped out for testing.
|
||||
type subcontainersInfoProvider interface {
|
||||
type infoProvider interface {
|
||||
// Get information about all subcontainers of the specified container (includes self).
|
||||
SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error)
|
||||
// Get information about the version.
|
||||
GetVersionInfo() (*info.VersionInfo, error)
|
||||
// Get information about the machine.
|
||||
GetMachineInfo() (*info.MachineInfo, error)
|
||||
}
|
||||
|
||||
// metricValue describes a single metric value for a given set of label values
|
||||
|
@ -60,21 +64,25 @@ type containerMetric struct {
|
|||
getValues func(s *info.ContainerStats) metricValues
|
||||
}
|
||||
|
||||
func (cm *containerMetric) desc() *prometheus.Desc {
|
||||
return prometheus.NewDesc(cm.name, cm.help, append([]string{"name", "id", "image"}, cm.extraLabels...), nil)
|
||||
func (cm *containerMetric) desc(baseLabels []string) *prometheus.Desc {
|
||||
return prometheus.NewDesc(cm.name, cm.help, append(baseLabels, cm.extraLabels...), nil)
|
||||
}
|
||||
|
||||
type ContainerNameToLabelsFunc func(containerName string) map[string]string
|
||||
|
||||
// PrometheusCollector implements prometheus.Collector.
|
||||
type PrometheusCollector struct {
|
||||
infoProvider subcontainersInfoProvider
|
||||
errors prometheus.Gauge
|
||||
containerMetrics []containerMetric
|
||||
infoProvider infoProvider
|
||||
errors prometheus.Gauge
|
||||
containerMetrics []containerMetric
|
||||
containerNameToLabels ContainerNameToLabelsFunc
|
||||
}
|
||||
|
||||
// NewPrometheusCollector returns a new PrometheusCollector.
|
||||
func NewPrometheusCollector(infoProvider subcontainersInfoProvider) *PrometheusCollector {
|
||||
func NewPrometheusCollector(infoProvider infoProvider, f ContainerNameToLabelsFunc) *PrometheusCollector {
|
||||
c := &PrometheusCollector{
|
||||
infoProvider: infoProvider,
|
||||
infoProvider: infoProvider,
|
||||
containerNameToLabels: f,
|
||||
errors: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "container",
|
||||
Name: "scrape_error",
|
||||
|
@ -117,6 +125,13 @@ func NewPrometheusCollector(infoProvider subcontainersInfoProvider) *PrometheusC
|
|||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_memory_failcnt",
|
||||
help: "Number of memory usage hits limits",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Memory.Failcnt)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_memory_usage_bytes",
|
||||
help: "Current memory usage in bytes.",
|
||||
|
@ -441,18 +456,34 @@ func NewPrometheusCollector(infoProvider subcontainersInfoProvider) *PrometheusC
|
|||
return c
|
||||
}
|
||||
|
||||
var (
|
||||
versionInfoDesc = prometheus.NewDesc("cadvisor_version_info", "A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision.", []string{"kernelVersion", "osVersion", "dockerVersion", "cadvisorVersion", "cadvisorRevision"}, nil)
|
||||
machineInfoCoresDesc = prometheus.NewDesc("machine_cpu_cores", "Number of CPU cores on the machine.", nil, nil)
|
||||
machineInfoMemoryDesc = prometheus.NewDesc("machine_memory_bytes", "Amount of memory installed on the machine.", nil, nil)
|
||||
)
|
||||
|
||||
// Describe describes all the metrics ever exported by cadvisor. It
|
||||
// implements prometheus.PrometheusCollector.
|
||||
func (c *PrometheusCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
c.errors.Describe(ch)
|
||||
for _, cm := range c.containerMetrics {
|
||||
ch <- cm.desc()
|
||||
ch <- cm.desc([]string{})
|
||||
}
|
||||
ch <- versionInfoDesc
|
||||
ch <- machineInfoCoresDesc
|
||||
ch <- machineInfoMemoryDesc
|
||||
}
|
||||
|
||||
// Collect fetches the stats from all containers and delivers them as
|
||||
// Prometheus metrics. It implements prometheus.PrometheusCollector.
|
||||
func (c *PrometheusCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
c.collectMachineInfo(ch)
|
||||
c.collectVersionInfo(ch)
|
||||
c.collectContainersInfo(ch)
|
||||
c.errors.Collect(ch)
|
||||
}
|
||||
|
||||
func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) {
|
||||
containers, err := c.infoProvider.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1})
|
||||
if err != nil {
|
||||
c.errors.Set(1)
|
||||
|
@ -460,20 +491,82 @@ func (c *PrometheusCollector) Collect(ch chan<- prometheus.Metric) {
|
|||
return
|
||||
}
|
||||
for _, container := range containers {
|
||||
baseLabels := []string{"id"}
|
||||
id := container.Name
|
||||
name := id
|
||||
if len(container.Aliases) > 0 {
|
||||
name = container.Aliases[0]
|
||||
baseLabels = append(baseLabels, "name")
|
||||
}
|
||||
image := container.Spec.Image
|
||||
stats := container.Stats[0]
|
||||
if len(image) > 0 {
|
||||
baseLabels = append(baseLabels, "image")
|
||||
}
|
||||
baseLabelValues := []string{id, name, image}[:len(baseLabels)]
|
||||
|
||||
if c.containerNameToLabels != nil {
|
||||
newLabels := c.containerNameToLabels(name)
|
||||
for k, v := range newLabels {
|
||||
baseLabels = append(baseLabels, k)
|
||||
baseLabelValues = append(baseLabelValues, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Container spec
|
||||
desc := prometheus.NewDesc("container_start_time_seconds", "Start time of the container since unix epoch in seconds.", baseLabels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.CreationTime.Unix()), baseLabelValues...)
|
||||
|
||||
if container.Spec.HasCpu {
|
||||
desc := prometheus.NewDesc("container_spec_cpu_shares", "CPU share of the container.", baseLabels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Limit), baseLabelValues...)
|
||||
}
|
||||
|
||||
if container.Spec.HasMemory {
|
||||
desc := prometheus.NewDesc("container_spec_memory_limit_bytes", "Memory limit for the container.", baseLabels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Limit), baseLabelValues...)
|
||||
desc = prometheus.NewDesc("container_spec_memory_swap_limit_bytes", "Memory swap limit for the container.", baseLabels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.SwapLimit), baseLabelValues...)
|
||||
}
|
||||
|
||||
// Now for the actual metrics
|
||||
stats := container.Stats[0]
|
||||
for _, cm := range c.containerMetrics {
|
||||
desc := cm.desc()
|
||||
desc := cm.desc(baseLabels)
|
||||
for _, metricValue := range cm.getValues(stats) {
|
||||
ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append([]string{name, id, image}, metricValue.labels...)...)
|
||||
ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append(baseLabelValues, metricValue.labels...)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
c.errors.Collect(ch)
|
||||
}
|
||||
|
||||
func (c *PrometheusCollector) collectVersionInfo(ch chan<- prometheus.Metric) {
|
||||
versionInfo, err := c.infoProvider.GetVersionInfo()
|
||||
if err != nil {
|
||||
c.errors.Set(1)
|
||||
glog.Warningf("Couldn't get version info: %s", err)
|
||||
return
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(versionInfoDesc, prometheus.GaugeValue, 1, []string{versionInfo.KernelVersion, versionInfo.ContainerOsVersion, versionInfo.DockerVersion, versionInfo.CadvisorVersion, versionInfo.CadvisorRevision}...)
|
||||
}
|
||||
|
||||
func (c *PrometheusCollector) collectMachineInfo(ch chan<- prometheus.Metric) {
|
||||
machineInfo, err := c.infoProvider.GetMachineInfo()
|
||||
if err != nil {
|
||||
c.errors.Set(1)
|
||||
glog.Warningf("Couldn't get machine info: %s", err)
|
||||
return
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(machineInfoCoresDesc, prometheus.GaugeValue, float64(machineInfo.NumCores))
|
||||
ch <- prometheus.MustNewConstMetric(machineInfoMemoryDesc, prometheus.GaugeValue, float64(machineInfo.MemoryCapacity))
|
||||
}
|
||||
|
||||
// Size after which we consider memory to be "unlimited". This is not
|
||||
// MaxInt64 due to rounding by the kernel.
|
||||
const maxMemorySize = uint64(1 << 62)
|
||||
|
||||
func specMemoryValue(v uint64) float64 {
|
||||
if v > maxMemorySize {
|
||||
return 0
|
||||
}
|
||||
return float64(v)
|
||||
}
|
||||
|
|
|
@ -53,11 +53,11 @@ func serveDockerPage(m manager.Manager, w http.ResponseWriter, u *url.URL) error
|
|||
start := time.Now()
|
||||
|
||||
// The container name is the path after the handler
|
||||
containerName := u.Path[len(DockerPage):]
|
||||
rootDir := getRootDir(u.Path)
|
||||
containerName := u.Path[len(DockerPage)-1:]
|
||||
rootDir := getRootDir(containerName)
|
||||
|
||||
var data *pageData
|
||||
if containerName == "" {
|
||||
if containerName == "/" {
|
||||
// Get the containers.
|
||||
reqParams := info.ContainerInfoRequest{
|
||||
NumStats: 0,
|
||||
|
@ -70,7 +70,7 @@ func serveDockerPage(m manager.Manager, w http.ResponseWriter, u *url.URL) error
|
|||
for _, cont := range conts {
|
||||
subcontainers = append(subcontainers, link{
|
||||
Text: getContainerDisplayName(cont.ContainerReference),
|
||||
Link: path.Join("/docker", docker.ContainerNameToDockerId(cont.ContainerReference.Name)),
|
||||
Link: path.Join(rootDir, DockerPage, docker.ContainerNameToDockerId(cont.ContainerReference.Name)),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,7 @@ func serveDockerPage(m manager.Manager, w http.ResponseWriter, u *url.URL) error
|
|||
ParentContainers: []link{
|
||||
{
|
||||
Text: dockerContainersText,
|
||||
Link: DockerPage,
|
||||
Link: path.Join(rootDir, DockerPage),
|
||||
}},
|
||||
Subcontainers: subcontainers,
|
||||
Root: rootDir,
|
||||
|
@ -106,7 +106,7 @@ func serveDockerPage(m manager.Manager, w http.ResponseWriter, u *url.URL) error
|
|||
reqParams := info.ContainerInfoRequest{
|
||||
NumStats: 60,
|
||||
}
|
||||
cont, err := m.DockerContainer(containerName, &reqParams)
|
||||
cont, err := m.DockerContainer(containerName[1:], &reqParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container %q with error: %v", containerName, err)
|
||||
}
|
||||
|
@ -115,12 +115,12 @@ func serveDockerPage(m manager.Manager, w http.ResponseWriter, u *url.URL) error
|
|||
// Make a list of the parent containers and their links
|
||||
var parentContainers []link
|
||||
parentContainers = append(parentContainers, link{
|
||||
Text: "Docker containers",
|
||||
Link: DockerPage,
|
||||
Text: "Docker Containers",
|
||||
Link: path.Join(rootDir, DockerPage),
|
||||
})
|
||||
parentContainers = append(parentContainers, link{
|
||||
Text: displayName,
|
||||
Link: path.Join(DockerPage, docker.ContainerNameToDockerId(cont.Name)),
|
||||
Link: path.Join(rootDir, DockerPage, docker.ContainerNameToDockerId(cont.Name)),
|
||||
})
|
||||
|
||||
// Get the MachineInfo
|
||||
|
|
|
@ -15,10 +15,10 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
bigquery "code.google.com/p/google-api-go-client/bigquery/v2"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/storage"
|
||||
"github.com/google/cadvisor/storage/bigquery/client"
|
||||
bigquery "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
type bigqueryStorage struct {
|
||||
|
|
2
Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/client/client.go
generated
vendored
2
Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/client/client.go
generated
vendored
|
@ -20,9 +20,9 @@ import (
|
|||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
bigquery "code.google.com/p/google-api-go-client/bigquery/v2"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
bigquery "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
127
Godeps/_workspace/src/github.com/google/cadvisor/storage/elasticsearch/elasticsearch.go
generated
vendored
Normal file
127
Godeps/_workspace/src/github.com/google/cadvisor/storage/elasticsearch/elasticsearch.go
generated
vendored
Normal file
|
@ -0,0 +1,127 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
storage "github.com/google/cadvisor/storage"
|
||||
"gopkg.in/olivere/elastic.v2"
|
||||
)
|
||||
|
||||
type elasticStorage struct {
|
||||
client *elastic.Client
|
||||
machineName string
|
||||
indexName string
|
||||
typeName string
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
type detailSpec struct {
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
MachineName string `json:"machine_name,omitempty"`
|
||||
ContainerName string `json:"container_Name,omitempty"`
|
||||
ContainerStats *info.ContainerStats `json:"container_stats,omitempty"`
|
||||
}
|
||||
|
||||
func (self *elasticStorage) containerStatsAndDefaultValues(
|
||||
ref info.ContainerReference, stats *info.ContainerStats) *detailSpec {
|
||||
timestamp := stats.Timestamp.UnixNano() / 1E3
|
||||
var containerName string
|
||||
if len(ref.Aliases) > 0 {
|
||||
containerName = ref.Aliases[0]
|
||||
} else {
|
||||
containerName = ref.Name
|
||||
}
|
||||
detail := &detailSpec{
|
||||
Timestamp: timestamp,
|
||||
MachineName: self.machineName,
|
||||
ContainerName: containerName,
|
||||
ContainerStats: stats,
|
||||
}
|
||||
return detail
|
||||
}
|
||||
|
||||
func (self *elasticStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error {
|
||||
if stats == nil {
|
||||
return nil
|
||||
}
|
||||
func() {
|
||||
// AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write.
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
// Add some default params based on ContainerStats
|
||||
detail := self.containerStatsAndDefaultValues(ref, stats)
|
||||
// Index a cadvisor (using JSON serialization)
|
||||
_, err := self.client.Index().
|
||||
Index(self.indexName).
|
||||
Type(self.typeName).
|
||||
BodyJson(detail).
|
||||
Do()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(fmt.Errorf("failed to write stats to ElasticSearch- %s", err))
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *elasticStorage) Close() error {
|
||||
self.client = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// machineName: A unique identifier to identify the host that current cAdvisor
|
||||
// instance is running on.
|
||||
// ElasticHost: The host which runs ElasticSearch.
|
||||
func New(machineName,
|
||||
indexName,
|
||||
typeName,
|
||||
elasticHost string,
|
||||
enableSniffer bool,
|
||||
) (storage.StorageDriver, error) {
|
||||
// Obtain a client and connect to the default Elasticsearch installation
|
||||
// on 127.0.0.1:9200. Of course you can configure your client to connect
|
||||
// to other hosts and configure it in various other ways.
|
||||
client, err := elastic.NewClient(
|
||||
elastic.SetHealthcheck(true),
|
||||
elastic.SetSniff(enableSniffer),
|
||||
elastic.SetHealthcheckInterval(30*time.Second),
|
||||
elastic.SetURL(elasticHost),
|
||||
)
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Ping the Elasticsearch server to get e.g. the version number
|
||||
info, code, err := client.Ping().URL(elasticHost).Do()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number)
|
||||
|
||||
ret := &elasticStorage{
|
||||
client: client,
|
||||
machineName: machineName,
|
||||
indexName: indexName,
|
||||
typeName: typeName,
|
||||
}
|
||||
return ret, nil
|
||||
}
|
|
@ -164,7 +164,7 @@ func (self *OomParser) StreamOoms(outStream chan *OomInstance) {
|
|||
}
|
||||
|
||||
func callJournalctl() (io.ReadCloser, error) {
|
||||
cmd := exec.Command("journalctl", "-f")
|
||||
cmd := exec.Command("journalctl", "-k", "-f")
|
||||
readcloser, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
0.18.0
|
|
@ -14,5 +14,22 @@
|
|||
|
||||
package version
|
||||
|
||||
// Version of cAdvisor.
|
||||
const VERSION = "0.16.0"
|
||||
// Build information. Populated at build-time.
|
||||
var (
|
||||
Version string
|
||||
Revision string
|
||||
Branch string
|
||||
BuildUser string
|
||||
BuildDate string
|
||||
GoVersion string
|
||||
)
|
||||
|
||||
// Info provides the iterable version information.
|
||||
var Info = map[string]string{
|
||||
"version": Version,
|
||||
"revision": Revision,
|
||||
"branch": Branch,
|
||||
"buildUser": BuildUser,
|
||||
"buildDate": BuildDate,
|
||||
"goVersion": GoVersion,
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package cadvisor
|
|||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -79,11 +80,24 @@ func (cc *cadvisorClient) exportHTTP(port uint) error {
|
|||
// Register the handlers regardless as this registers the prometheus
|
||||
// collector properly.
|
||||
mux := http.NewServeMux()
|
||||
err := cadvisorHttp.RegisterHandlers(mux, cc, "", "", "", "", "/metrics")
|
||||
err := cadvisorHttp.RegisterHandlers(mux, cc, "", "", "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`^k8s_(?P<kubernetes_container_name>[^_\.]+)[^_]+_(?P<kubernetes_pod_name>[^_]+)_(?P<kubernetes_namespace>[^_]+)`)
|
||||
reCaptureNames := re.SubexpNames()
|
||||
cadvisorHttp.RegisterPrometheusHandler(mux, cc, "/metrics", func(name string) map[string]string {
|
||||
extraLabels := map[string]string{}
|
||||
matches := re.FindStringSubmatch(name)
|
||||
for i, match := range matches {
|
||||
if len(reCaptureNames[i]) > 0 {
|
||||
extraLabels[re.SubexpNames()[i]] = match
|
||||
}
|
||||
}
|
||||
return extraLabels
|
||||
})
|
||||
|
||||
// Only start the http server if port > 0
|
||||
if port > 0 {
|
||||
serv := &http.Server{
|
||||
|
|
Loading…
Reference in New Issue