Merge pull request #5308 from vmarmol/cadvisor-build

Run cAdvisor inside Kubelet
pull/6/head
Dawn Chen 2015-03-13 16:41:40 -07:00
commit 39b52e92a9
36 changed files with 1021 additions and 617 deletions

70
Godeps/Godeps.json generated
View File

@ -185,83 +185,83 @@
}, },
{ {
"ImportPath": "github.com/google/cadvisor/api", "ImportPath": "github.com/google/cadvisor/api",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
},
{
"ImportPath": "github.com/google/cadvisor/client",
"Comment": "0.10.1-42-g4e2479b",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/container", "ImportPath": "github.com/google/cadvisor/container",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/events", "ImportPath": "github.com/google/cadvisor/events",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/fs", "ImportPath": "github.com/google/cadvisor/fs",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/healthz", "ImportPath": "github.com/google/cadvisor/healthz",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/http", "ImportPath": "github.com/google/cadvisor/http",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/info/v1", "ImportPath": "github.com/google/cadvisor/info/v1",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/info/v2", "ImportPath": "github.com/google/cadvisor/info/v2",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/manager", "ImportPath": "github.com/google/cadvisor/manager",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
},
{
"ImportPath": "github.com/google/cadvisor/metrics",
"Comment": "0.10.1-62-ge78e515",
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/pages", "ImportPath": "github.com/google/cadvisor/pages",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/storage", "ImportPath": "github.com/google/cadvisor/storage",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/summary", "ImportPath": "github.com/google/cadvisor/summary",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/utils", "ImportPath": "github.com/google/cadvisor/utils",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/validate", "ImportPath": "github.com/google/cadvisor/validate",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/version", "ImportPath": "github.com/google/cadvisor/version",
"Comment": "0.10.1-42-g4e2479b", "Comment": "0.10.1-62-ge78e515",
"Rev": "4e2479bcabe7af08825066f5ece2122553562b34" "Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
}, },
{ {
"ImportPath": "github.com/google/gofuzz", "ImportPath": "github.com/google/gofuzz",

View File

@ -151,9 +151,9 @@ func streamResults(results chan *events.Event, w http.ResponseWriter, r *http.Re
for { for {
select { select {
case <-cn.CloseNotify(): case <-cn.CloseNotify():
glog.Infof("Client stopped listening")
return nil return nil
case ev := <-results: case ev := <-results:
glog.V(3).Infof("Received event from watch channel in api: %v", ev)
err := enc.Encode(ev) err := enc.Encode(ev)
if err != nil { if err != nil {
glog.Errorf("error encoding message %+v for result stream: %v", ev, err) glog.Errorf("error encoding message %+v for result stream: %v", ev, err)

View File

@ -17,6 +17,7 @@ package api
import ( import (
"fmt" "fmt"
"net/http" "net/http"
"strconv"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/google/cadvisor/events" "github.com/google/cadvisor/events"
@ -31,8 +32,10 @@ const (
machineApi = "machine" machineApi = "machine"
dockerApi = "docker" dockerApi = "docker"
summaryApi = "summary" summaryApi = "summary"
statsApi = "stats"
specApi = "spec" specApi = "spec"
eventsApi = "events" eventsApi = "events"
storageApi = "storage"
) )
// Interface for a cAdvisor API version // Interface for a cAdvisor API version
@ -313,6 +316,22 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
} }
return writeResult(stats, w) return writeResult(stats, w)
case statsApi:
name := getContainerName(request)
sr, err := getStatsRequest(name, r)
if err != nil {
return err
}
glog.V(2).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, sr)
query := info.ContainerInfoRequest{
NumStats: sr.Count,
}
cont, err := m.GetContainerInfo(name, &query)
if err != nil {
return fmt.Errorf("failed to get container %q: %v", name, err)
}
contStats := convertStats(cont)
return writeResult(contStats, w)
case specApi: case specApi:
containerName := getContainerName(request) containerName := getContainerName(request)
glog.V(2).Infof("Api - Spec(%v)", containerName) glog.V(2).Infof("Api - Spec(%v)", containerName)
@ -322,6 +341,24 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
} }
specV2 := convertSpec(spec) specV2 := convertSpec(spec)
return writeResult(specV2, w) return writeResult(specV2, w)
case storageApi:
var err error
fi := []v2.FsInfo{}
label := r.URL.Query().Get("label")
if len(label) == 0 {
// Get all global filesystems info.
fi, err = m.GetFsInfo("")
if err != nil {
return err
}
} else {
// Get a specific label.
fi, err = m.GetFsInfo(label)
if err != nil {
return err
}
}
return writeResult(fi, w)
default: default:
return self.baseVersion.HandleRequest(requestType, request, m, w, r) return self.baseVersion.HandleRequest(requestType, request, m, w, r)
} }
@ -346,3 +383,59 @@ func convertSpec(specV1 info.ContainerSpec) v2.ContainerSpec {
} }
return specV2 return specV2
} }
func convertStats(cont *info.ContainerInfo) []v2.ContainerStats {
stats := []v2.ContainerStats{}
for _, val := range cont.Stats {
stat := v2.ContainerStats{
Timestamp: val.Timestamp,
HasCpu: cont.Spec.HasCpu,
HasMemory: cont.Spec.HasMemory,
HasNetwork: cont.Spec.HasNetwork,
HasFilesystem: cont.Spec.HasFilesystem,
HasDiskIo: cont.Spec.HasDiskIo,
}
if stat.HasCpu {
stat.Cpu = val.Cpu
}
if stat.HasMemory {
stat.Memory = val.Memory
}
if stat.HasNetwork {
// TODO(rjnagal): Return stats about all network interfaces.
stat.Network = append(stat.Network, val.Network)
}
if stat.HasFilesystem {
stat.Filesystem = val.Filesystem
}
if stat.HasDiskIo {
stat.DiskIo = val.DiskIo
}
// TODO(rjnagal): Handle load stats.
stats = append(stats, stat)
}
return stats
}
func getStatsRequest(id string, r *http.Request) (v2.StatsRequest, error) {
// fill in the defaults.
sr := v2.StatsRequest{
IdType: "name",
Count: 64,
Recursive: false,
}
idType := r.URL.Query().Get("type")
if len(idType) != 0 && idType != "name" {
return sr, fmt.Errorf("unknown 'type' %q for container name %q", idType, id)
}
count := r.URL.Query().Get("count")
if len(count) != 0 {
n, err := strconv.ParseUint(count, 10, 32)
if err != nil {
return sr, fmt.Errorf("failed to parse 'count' option: %v", count)
}
sr.Count = int(n)
}
// TODO(rjnagal): Add option to specify recursive.
return sr, nil
}

View File

@ -1,54 +0,0 @@
# Example REST API Client
This is an implementation of a cAdvisor REST API in Go. You can use it like this:
```go
client, err := client.NewClient("http://192.168.59.103:8080/")
```
Obviously, replace the URL with the path to your actual cAdvisor REST endpoint.
### MachineInfo
```go
client.MachineInfo()
```
This method returns a cadvisor/info.MachineInfo struct with all the fields filled in. Here is an example return value:
```
(*info.MachineInfo)(0xc208022b10)({
NumCores: (int) 4,
MemoryCapacity: (int64) 2106028032,
Filesystems: ([]info.FsInfo) (len=1 cap=4) {
(info.FsInfo) {
Device: (string) (len=9) "/dev/sda1",
Capacity: (uint64) 19507089408
}
}
})
```
You can see the full specification of the [MachineInfo struct in the source](../info/container.go)
### ContainerInfo
Given a container name and a ContainerInfoRequest, will return all information about the specified container. The ContainerInfoRequest struct just has one field, NumStats, which is the number of stat entries that you want returned.
```go
request := info.ContainerInfoRequest{10}
sInfo, err := client.ContainerInfo("/docker/d9d3eb10179e6f93a...", &request)
```
Returns a [ContainerInfo struct](../info/container.go)
### SubcontainersInfo
Given a container name and a ContainerInfoRequest, will recursively return all info about the container and all subcontainers contained within the container. The ContainerInfoRequest struct just has one field, NumStats, which is the number of stat entries that you want returned.
```go
request := info.ContainerInfoRequest{10}
sInfo, err := client.SubcontainersInfo("/docker", &request)
```
Returns a [ContainerInfo struct](../info/container.go) with the Subcontainers field populated.

View File

@ -1,164 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO(cAdvisor): Package comment.
package client
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"path"
"strings"
info "github.com/google/cadvisor/info/v1"
)
// Client represents the base URL for a cAdvisor client.
type Client struct {
baseUrl string
}
// NewClient returns a new client with the specified base URL.
func NewClient(url string) (*Client, error) {
if !strings.HasSuffix(url, "/") {
url += "/"
}
return &Client{
baseUrl: fmt.Sprintf("%sapi/v1.2/", url),
}, nil
}
// MachineInfo returns the JSON machine information for this client.
// A non-nil error result indicates a problem with obtaining
// the JSON machine information data.
func (self *Client) MachineInfo() (minfo *info.MachineInfo, err error) {
u := self.machineInfoUrl()
ret := new(info.MachineInfo)
if err = self.httpGetJsonData(ret, nil, u, "machine info"); err != nil {
return
}
minfo = ret
return
}
// ContainerInfo returns the JSON container information for the specified
// container and request.
func (self *Client) ContainerInfo(name string, query *info.ContainerInfoRequest) (cinfo *info.ContainerInfo, err error) {
u := self.containerInfoUrl(name)
ret := new(info.ContainerInfo)
if err = self.httpGetJsonData(ret, query, u, fmt.Sprintf("container info for %q", name)); err != nil {
return
}
cinfo = ret
return
}
// Returns the information about all subcontainers (recursive) of the specified container (including itself).
func (self *Client) SubcontainersInfo(name string, query *info.ContainerInfoRequest) ([]info.ContainerInfo, error) {
var response []info.ContainerInfo
url := self.subcontainersInfoUrl(name)
err := self.httpGetJsonData(&response, query, url, fmt.Sprintf("subcontainers container info for %q", name))
if err != nil {
return []info.ContainerInfo{}, err
}
return response, nil
}
// Returns the JSON container information for the specified
// Docker container and request.
func (self *Client) DockerContainer(name string, query *info.ContainerInfoRequest) (cinfo info.ContainerInfo, err error) {
u := self.dockerInfoUrl(name)
ret := make(map[string]info.ContainerInfo)
if err = self.httpGetJsonData(&ret, query, u, fmt.Sprintf("Docker container info for %q", name)); err != nil {
return
}
if len(ret) != 1 {
err = fmt.Errorf("expected to only receive 1 Docker container: %+v", ret)
return
}
for _, cont := range ret {
cinfo = cont
}
return
}
// Returns the JSON container information for all Docker containers.
func (self *Client) AllDockerContainers(query *info.ContainerInfoRequest) (cinfo []info.ContainerInfo, err error) {
u := self.dockerInfoUrl("/")
ret := make(map[string]info.ContainerInfo)
if err = self.httpGetJsonData(&ret, query, u, "all Docker containers info"); err != nil {
return
}
cinfo = make([]info.ContainerInfo, 0, len(ret))
for _, cont := range ret {
cinfo = append(cinfo, cont)
}
return
}
func (self *Client) machineInfoUrl() string {
return self.baseUrl + path.Join("machine")
}
func (self *Client) containerInfoUrl(name string) string {
return self.baseUrl + path.Join("containers", name)
}
func (self *Client) subcontainersInfoUrl(name string) string {
return self.baseUrl + path.Join("subcontainers", name)
}
func (self *Client) dockerInfoUrl(name string) string {
return self.baseUrl + path.Join("docker", name)
}
func (self *Client) httpGetJsonData(data, postData interface{}, url, infoName string) error {
var resp *http.Response
var err error
if postData != nil {
data, err := json.Marshal(postData)
if err != nil {
return fmt.Errorf("unable to marshal data: %v", err)
}
resp, err = http.Post(url, "application/json", bytes.NewBuffer(data))
} else {
resp, err = http.Get(url)
}
if err != nil {
return fmt.Errorf("unable to get %q from %q: %v", infoName, url, err)
}
if resp == nil {
return fmt.Errorf("received empty response for %q from %q", infoName, url)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
err = fmt.Errorf("unable to read all %q from %q: %v", infoName, url, err)
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("request %q failed with error: %q", url, strings.TrimSpace(string(body)))
}
if err = json.Unmarshal(body, data); err != nil {
err = fmt.Errorf("unable to unmarshal %q (Body: %q) from %q with error: %v", infoName, string(body), url, err)
return err
}
return nil
}

View File

@ -1,190 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"path"
"reflect"
"strings"
"testing"
"time"
info "github.com/google/cadvisor/info/v1"
itest "github.com/google/cadvisor/info/v1/test"
"github.com/kr/pretty"
)
func testGetJsonData(
expected interface{},
f func() (interface{}, error),
) error {
reply, err := f()
if err != nil {
return fmt.Errorf("unable to retrieve data: %v", err)
}
if !reflect.DeepEqual(reply, expected) {
return pretty.Errorf("retrieved wrong data: %# v != %# v", reply, expected)
}
return nil
}
func cadvisorTestClient(path string, expectedPostObj *info.ContainerInfoRequest, replyObj interface{}, t *testing.T) (*Client, *httptest.Server, error) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == path {
if expectedPostObj != nil {
expectedPostObjEmpty := new(info.ContainerInfoRequest)
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(expectedPostObjEmpty); err != nil {
t.Errorf("Received invalid object: %v", err)
}
if expectedPostObj.NumStats != expectedPostObjEmpty.NumStats ||
expectedPostObj.Start.Unix() != expectedPostObjEmpty.Start.Unix() ||
expectedPostObj.End.Unix() != expectedPostObjEmpty.End.Unix() {
t.Errorf("Received unexpected object: %+v, expected: %+v", expectedPostObjEmpty, expectedPostObj)
}
}
encoder := json.NewEncoder(w)
encoder.Encode(replyObj)
} else if r.URL.Path == "/api/v1.2/machine" {
fmt.Fprint(w, `{"num_cores":8,"memory_capacity":31625871360, "disk_map":["8:0":{"name":"sda","major":8,"minor":0,"size":10737418240}]}`)
} else {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintf(w, "Page not found.")
}
}))
client, err := NewClient(ts.URL)
if err != nil {
ts.Close()
return nil, nil, err
}
return client, ts, err
}
// TestGetMachineInfo performs one test to check if MachineInfo()
// in a cAdvisor client returns the correct result.
func TestGetMachineinfo(t *testing.T) {
minfo := &info.MachineInfo{
NumCores: 8,
MemoryCapacity: 31625871360,
DiskMap: map[string]info.DiskInfo{
"8:0": {
Name: "sda",
Major: 8,
Minor: 0,
Size: 10737418240,
},
},
}
client, server, err := cadvisorTestClient("/api/v1.2/machine", nil, minfo, t)
if err != nil {
t.Fatalf("unable to get a client %v", err)
}
defer server.Close()
returned, err := client.MachineInfo()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(returned, minfo) {
t.Fatalf("received unexpected machine info")
}
}
// TestGetContainerInfo generates a random container information object
// and then checks that ContainerInfo returns the expected result.
func TestGetContainerInfo(t *testing.T) {
query := &info.ContainerInfoRequest{
NumStats: 3,
}
containerName := "/some/container"
cinfo := itest.GenerateRandomContainerInfo(containerName, 4, query, 1*time.Second)
client, server, err := cadvisorTestClient(fmt.Sprintf("/api/v1.2/containers%v", containerName), query, cinfo, t)
if err != nil {
t.Fatalf("unable to get a client %v", err)
}
defer server.Close()
returned, err := client.ContainerInfo(containerName, query)
if err != nil {
t.Fatal(err)
}
if !returned.Eq(cinfo) {
t.Error("received unexpected ContainerInfo")
}
}
// Test a request failing
func TestRequestFails(t *testing.T) {
errorText := "there was an error"
// Setup a server that simply fails.
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, errorText, 500)
}))
client, err := NewClient(ts.URL)
if err != nil {
ts.Close()
t.Fatal(err)
}
defer ts.Close()
_, err = client.ContainerInfo("/", &info.ContainerInfoRequest{NumStats: 3})
if err == nil {
t.Fatalf("Expected non-nil error")
}
expectedError := fmt.Sprintf("request failed with error: %q", errorText)
if strings.Contains(err.Error(), expectedError) {
t.Fatalf("Expected error %q but received %q", expectedError, err)
}
}
func TestGetSubcontainersInfo(t *testing.T) {
query := &info.ContainerInfoRequest{
NumStats: 3,
}
containerName := "/some/container"
cinfo := itest.GenerateRandomContainerInfo(containerName, 4, query, 1*time.Second)
cinfo1 := itest.GenerateRandomContainerInfo(path.Join(containerName, "sub1"), 4, query, 1*time.Second)
cinfo2 := itest.GenerateRandomContainerInfo(path.Join(containerName, "sub2"), 4, query, 1*time.Second)
response := []info.ContainerInfo{
*cinfo,
*cinfo1,
*cinfo2,
}
client, server, err := cadvisorTestClient(fmt.Sprintf("/api/v1.2/subcontainers%v", containerName), query, response, t)
if err != nil {
t.Fatalf("unable to get a client %v", err)
}
defer server.Close()
returned, err := client.SubcontainersInfo(containerName, query)
if err != nil {
t.Fatal(err)
}
if len(returned) != 3 {
t.Errorf("unexpected number of results: got %d, expected 3", len(returned))
}
if !returned[0].Eq(cinfo) {
t.Error("received unexpected ContainerInfo")
}
if !returned[1].Eq(cinfo1) {
t.Error("received unexpected ContainerInfo")
}
if !returned[2].Eq(cinfo2) {
t.Error("received unexpected ContainerInfo")
}
}

View File

@ -28,6 +28,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/libcontainer" "github.com/google/cadvisor/container/libcontainer"
"github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/utils" "github.com/google/cadvisor/utils"
) )
@ -62,6 +63,10 @@ func UseSystemd() bool {
return useSystemd return useSystemd
} }
func RootDir() string {
return *dockerRootDir
}
type dockerFactory struct { type dockerFactory struct {
machineInfoFactory info.MachineInfoFactory machineInfoFactory info.MachineInfoFactory
@ -72,6 +77,9 @@ type dockerFactory struct {
// Information about the mounted cgroup subsystems. // Information about the mounted cgroup subsystems.
cgroupSubsystems libcontainer.CgroupSubsystems cgroupSubsystems libcontainer.CgroupSubsystems
// Information about mounted filesystems.
fsInfo fs.FsInfo
} }
func (self *dockerFactory) String() string { func (self *dockerFactory) String() string {
@ -87,6 +95,7 @@ func (self *dockerFactory) NewContainerHandler(name string) (handler container.C
client, client,
name, name,
self.machineInfoFactory, self.machineInfoFactory,
self.fsInfo,
*dockerRootDir, *dockerRootDir,
self.usesAufsDriver, self.usesAufsDriver,
&self.cgroupSubsystems, &self.cgroupSubsystems,
@ -151,7 +160,7 @@ func parseDockerVersion(full_version_string string) ([]int, error) {
} }
// Register root container before running this function! // Register root container before running this function!
func Register(factory info.MachineInfoFactory) error { func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo) error {
client, err := docker.NewClient(*ArgDockerEndpoint) client, err := docker.NewClient(*ArgDockerEndpoint)
if err != nil { if err != nil {
return fmt.Errorf("unable to communicate with docker daemon: %v", err) return fmt.Errorf("unable to communicate with docker daemon: %v", err)
@ -213,6 +222,7 @@ func Register(factory info.MachineInfoFactory) error {
client: client, client: client,
usesAufsDriver: usesAufsDriver, usesAufsDriver: usesAufsDriver,
cgroupSubsystems: cgroupSubsystems, cgroupSubsystems: cgroupSubsystems,
fsInfo: fsInfo,
} }
container.RegisterContainerHandlerFactory(f) container.RegisterContainerHandlerFactory(f)
return nil return nil

View File

@ -82,16 +82,11 @@ func newDockerContainerHandler(
client *docker.Client, client *docker.Client,
name string, name string,
machineInfoFactory info.MachineInfoFactory, machineInfoFactory info.MachineInfoFactory,
fsInfo fs.FsInfo,
dockerRootDir string, dockerRootDir string,
usesAufsDriver bool, usesAufsDriver bool,
cgroupSubsystems *containerLibcontainer.CgroupSubsystems, cgroupSubsystems *containerLibcontainer.CgroupSubsystems,
) (container.ContainerHandler, error) { ) (container.ContainerHandler, error) {
// TODO(vmarmol): Get from factory.
fsInfo, err := fs.NewFsInfo()
if err != nil {
return nil, err
}
// Create the cgroup paths. // Create the cgroup paths.
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints)) cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
for key, val := range cgroupSubsystems.MountPoints { for key, val := range cgroupSubsystems.MountPoints {

View File

@ -48,6 +48,14 @@ func RegisterContainerHandlerFactory(factory ContainerHandlerFactory) {
factories = append(factories, factory) factories = append(factories, factory)
} }
// Returns whether there are any container handler factories registered.
func HasFactories() bool {
factoriesLock.Lock()
defer factoriesLock.Unlock()
return len(factories) != 0
}
// Create a new ContainerHandler for the specified container. // Create a new ContainerHandler for the specified container.
func NewContainerHandler(name string) (ContainerHandler, error) { func NewContainerHandler(name string) (ContainerHandler, error) {
factoriesLock.RLock() factoriesLock.RLock()

View File

@ -20,6 +20,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/libcontainer" "github.com/google/cadvisor/container/libcontainer"
"github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
) )
@ -29,6 +30,9 @@ type rawFactory struct {
// Information about the cgroup subsystems. // Information about the cgroup subsystems.
cgroupSubsystems *libcontainer.CgroupSubsystems cgroupSubsystems *libcontainer.CgroupSubsystems
// Information about mounted filesystems.
fsInfo fs.FsInfo
} }
func (self *rawFactory) String() string { func (self *rawFactory) String() string {
@ -36,7 +40,7 @@ func (self *rawFactory) String() string {
} }
func (self *rawFactory) NewContainerHandler(name string) (container.ContainerHandler, error) { func (self *rawFactory) NewContainerHandler(name string) (container.ContainerHandler, error) {
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory) return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo)
} }
// The raw factory can handle any container. // The raw factory can handle any container.
@ -44,7 +48,7 @@ func (self *rawFactory) CanHandle(name string) (bool, error) {
return true, nil return true, nil
} }
func Register(machineInfoFactory info.MachineInfoFactory) error { func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo) error {
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems() cgroupSubsystems, err := libcontainer.GetCgroupSubsystems()
if err != nil { if err != nil {
return fmt.Errorf("failed to get cgroup subsystems: %v", err) return fmt.Errorf("failed to get cgroup subsystems: %v", err)
@ -56,6 +60,7 @@ func Register(machineInfoFactory info.MachineInfoFactory) error {
glog.Infof("Registering Raw factory") glog.Infof("Registering Raw factory")
factory := &rawFactory{ factory := &rawFactory{
machineInfoFactory: machineInfoFactory, machineInfoFactory: machineInfoFactory,
fsInfo: fsInfo,
cgroupSubsystems: &cgroupSubsystems, cgroupSubsystems: &cgroupSubsystems,
} }
container.RegisterContainerHandlerFactory(factory) container.RegisterContainerHandlerFactory(factory)

View File

@ -71,18 +71,13 @@ type rawContainerHandler struct {
externalMounts []mount externalMounts []mount
} }
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory) (container.ContainerHandler, error) { func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo) (container.ContainerHandler, error) {
// Create the cgroup paths. // Create the cgroup paths.
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints)) cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
for key, val := range cgroupSubsystems.MountPoints { for key, val := range cgroupSubsystems.MountPoints {
cgroupPaths[key] = path.Join(val, name) cgroupPaths[key] = path.Join(val, name)
} }
// TODO(vmarmol): Get from factory.
fsInfo, err := fs.NewFsInfo()
if err != nil {
return nil, err
}
cHints, err := getContainerHintsFromFile(*argContainerHints) cHints, err := getContainerHintsFromFile(*argContainerHints)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -29,6 +29,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"path" "path"
"path/filepath"
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
@ -41,6 +42,11 @@ import (
var partitionRegex = regexp.MustCompile("^(:?(:?s|xv)d[a-z]+\\d*|dm-\\d+)$") var partitionRegex = regexp.MustCompile("^(:?(:?s|xv)d[a-z]+\\d*|dm-\\d+)$")
const (
LabelSystemRoot = "root"
LabelDockerImages = "docker-images"
)
type partition struct { type partition struct {
mountpoint string mountpoint string
major uint major uint
@ -48,15 +54,26 @@ type partition struct {
} }
type RealFsInfo struct { type RealFsInfo struct {
// Map from block device path to partition information.
partitions map[string]partition partitions map[string]partition
// Map from label to block device path.
// Labels are intent-specific tags that are auto-detected.
labels map[string]string
} }
func NewFsInfo() (FsInfo, error) { type Context struct {
// docker root directory.
DockerRoot string
}
func NewFsInfo(context Context) (FsInfo, error) {
mounts, err := mount.GetMounts() mounts, err := mount.GetMounts()
if err != nil { if err != nil {
return nil, err return nil, err
} }
partitions := make(map[string]partition, 0) partitions := make(map[string]partition, 0)
fsInfo := &RealFsInfo{}
fsInfo.labels = make(map[string]string, 0)
for _, mount := range mounts { for _, mount := range mounts {
if !strings.HasPrefix(mount.Fstype, "ext") && mount.Fstype != "btrfs" { if !strings.HasPrefix(mount.Fstype, "ext") && mount.Fstype != "btrfs" {
continue continue
@ -68,7 +85,84 @@ func NewFsInfo() (FsInfo, error) {
partitions[mount.Source] = partition{mount.Mountpoint, uint(mount.Major), uint(mount.Minor)} partitions[mount.Source] = partition{mount.Mountpoint, uint(mount.Major), uint(mount.Minor)}
} }
glog.Infof("Filesystem partitions: %+v", partitions) glog.Infof("Filesystem partitions: %+v", partitions)
return &RealFsInfo{partitions}, nil fsInfo.partitions = partitions
fsInfo.addLabels(context)
return fsInfo, nil
}
func (self *RealFsInfo) addLabels(context Context) {
dockerPaths := getDockerImagePaths(context)
for src, p := range self.partitions {
if p.mountpoint == "/" {
if _, ok := self.labels[LabelSystemRoot]; !ok {
self.labels[LabelSystemRoot] = src
}
}
self.updateDockerImagesPath(src, p.mountpoint, dockerPaths)
// TODO(rjnagal): Add label for docker devicemapper pool.
}
}
// Generate a list of possible mount points for docker image management from the docker root directory.
// Right now, we look for each type of supported graph driver directories, but we can do better by parsing
// some of the context from `docker info`.
func getDockerImagePaths(context Context) []string {
// TODO(rjnagal): Detect docker root and graphdriver directories from docker info.
dockerRoot := context.DockerRoot
dockerImagePaths := []string{}
for _, dir := range []string{"devicemapper", "btrfs", "aufs"} {
dockerImagePaths = append(dockerImagePaths, path.Join(dockerRoot, dir))
}
for dockerRoot != "/" && dockerRoot != "." {
dockerImagePaths = append(dockerImagePaths, dockerRoot)
dockerRoot = filepath.Dir(dockerRoot)
}
dockerImagePaths = append(dockerImagePaths, "/")
return dockerImagePaths
}
// This method compares the mountpoint with possible docker image mount points. If a match is found,
// docker images label is added to the partition.
func (self *RealFsInfo) updateDockerImagesPath(source string, mountpoint string, dockerImagePaths []string) {
for _, v := range dockerImagePaths {
if v == mountpoint {
if i, ok := self.labels[LabelDockerImages]; ok {
// pick the innermost mountpoint.
mnt := self.partitions[i].mountpoint
if len(mnt) < len(mountpoint) {
self.labels[LabelDockerImages] = source
}
} else {
self.labels[LabelDockerImages] = source
}
}
}
}
func (self *RealFsInfo) GetDeviceForLabel(label string) (string, error) {
dev, ok := self.labels[label]
if !ok {
return "", fmt.Errorf("non-existent label %q", label)
}
return dev, nil
}
func (self *RealFsInfo) GetLabelsForDevice(device string) ([]string, error) {
labels := []string{}
for label, dev := range self.labels {
if dev == device {
labels = append(labels, label)
}
}
return labels, nil
}
func (self *RealFsInfo) GetMountpointForDevice(dev string) (string, error) {
p, ok := self.partitions[dev]
if !ok {
return "", fmt.Errorf("no partition info for device %q", dev)
}
return p.mountpoint, nil
} }
func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, error) { func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, error) {

View File

@ -53,4 +53,13 @@ type FsInfo interface {
// Returns the block device info of the filesystem on which 'dir' resides. // Returns the block device info of the filesystem on which 'dir' resides.
GetDirFsDevice(dir string) (*DeviceInfo, error) GetDirFsDevice(dir string) (*DeviceInfo, error)
// Returns the device name associated with a particular label.
GetDeviceForLabel(label string) (string, error)
// Returns all labels associated with a particular device name.
GetLabelsForDevice(device string) ([]string, error)
// Returns the mountpoint associated with a particular device.
GetMountpointForDevice(device string) (string, error)
} }

View File

@ -24,12 +24,14 @@ import (
"github.com/google/cadvisor/healthz" "github.com/google/cadvisor/healthz"
httpMux "github.com/google/cadvisor/http/mux" httpMux "github.com/google/cadvisor/http/mux"
"github.com/google/cadvisor/manager" "github.com/google/cadvisor/manager"
"github.com/google/cadvisor/metrics"
"github.com/google/cadvisor/pages" "github.com/google/cadvisor/pages"
"github.com/google/cadvisor/pages/static" "github.com/google/cadvisor/pages/static"
"github.com/google/cadvisor/validate" "github.com/google/cadvisor/validate"
"github.com/prometheus/client_golang/prometheus"
) )
func RegisterHandlers(mux httpMux.Mux, containerManager manager.Manager, httpAuthFile, httpAuthRealm, httpDigestFile, httpDigestRealm string) error { func RegisterHandlers(mux httpMux.Mux, containerManager manager.Manager, httpAuthFile, httpAuthRealm, httpDigestFile, httpDigestRealm, prometheusEndpoint string) error {
// Basic health handler. // Basic health handler.
if err := healthz.RegisterHandler(mux); err != nil { if err := healthz.RegisterHandler(mux); err != nil {
return fmt.Errorf("failed to register healthz handler: %s", err) return fmt.Errorf("failed to register healthz handler: %s", err)
@ -83,6 +85,10 @@ func RegisterHandlers(mux httpMux.Mux, containerManager manager.Manager, httpAut
} }
} }
collector := metrics.NewPrometheusCollector(containerManager)
prometheus.MustRegister(collector)
http.Handle(prometheusEndpoint, prometheus.Handler())
return nil return nil
} }

View File

@ -228,7 +228,7 @@ type LoadStats struct {
NrStopped uint64 `json:"nr_stopped"` NrStopped uint64 `json:"nr_stopped"`
// Number of tasks in uninterruptible state // Number of tasks in uninterruptible state
NrUinterruptible uint64 `json:"nr_uninterruptible"` NrUninterruptible uint64 `json:"nr_uninterruptible"`
// Number of tasks waiting on IO // Number of tasks waiting on IO
NrIoWait uint64 `json:"nr_io_wait"` NrIoWait uint64 `json:"nr_io_wait"`

View File

@ -16,6 +16,10 @@ package v2
import ( import (
"time" "time"
// TODO(rjnagal): Remove dependency after moving all stats structs from v1.
// using v1 now for easy conversion.
"github.com/google/cadvisor/info/v1"
) )
type CpuSpec struct { type CpuSpec struct {
@ -54,6 +58,29 @@ type ContainerSpec struct {
Memory MemorySpec `json:"memory,omitempty"` Memory MemorySpec `json:"memory,omitempty"`
} }
type ContainerStats struct {
// The time of this stat point.
Timestamp time.Time `json:"timestamp"`
// CPU statistics
HasCpu bool `json:"has_cpu"`
Cpu v1.CpuStats `json:"cpu,omitempty"`
// Disk IO statistics
HasDiskIo bool `json:"has_diskio"`
DiskIo v1.DiskIoStats `json:"diskio,omitempty"`
// Memory statistics
HasMemory bool `json:"has_memory"`
Memory v1.MemoryStats `json:"memory,omitempty"`
// Network statistics
HasNetwork bool `json:"has_network"`
Network []v1.NetworkStats `json:"network,omitempty"`
// Filesystem statistics
HasFilesystem bool `json:"has_filesystem"`
Filesystem []v1.FsStats `json:"filesystem,omitempty"`
// Task load statistics
HasLoad bool `json:"has_load"`
Load v1.LoadStats `json:"load_stats,omitempty"`
}
type Percentiles struct { type Percentiles struct {
// Indicates whether the stats are present or not. // Indicates whether the stats are present or not.
// If true, values below do not have any data. // If true, values below do not have any data.
@ -97,3 +124,29 @@ type DerivedStats struct {
// Percentile in last day. // Percentile in last day.
DayUsage Usage `json:"day_usage"` DayUsage Usage `json:"day_usage"`
} }
type FsInfo struct {
// The block device name associated with the filesystem.
Device string `json:"device"`
// Path where the filesystem is mounted.
Mountpoint string `json:"mountpoint"`
// Filesystem usage in bytes.
Capacity uint64 `json:"capacity"`
// Number of bytes used on this filesystem.
Usage uint64 `json:"usage"`
// Labels associated with this filesystem.
Labels []string `json:"labels"`
}
type StatsRequest struct {
// Type of container identifier specified - "name", "dockerid", dockeralias"
IdType string `json:"type"`
// Number of stats to return
Count int `json:"count"`
// Whether to include stats for child subcontainers.
Recursive bool `json:"recursive"`
}

View File

@ -223,7 +223,7 @@ func getMachineID() string {
return "" return ""
} }
func getMachineInfo(sysFs sysfs.SysFs) (*info.MachineInfo, error) { func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo) (*info.MachineInfo, error) {
cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo") cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo")
clockSpeed, err := getClockSpeed(cpuinfo) clockSpeed, err := getClockSpeed(cpuinfo)
if err != nil { if err != nil {
@ -241,34 +241,29 @@ func getMachineInfo(sysFs sysfs.SysFs) (*info.MachineInfo, error) {
return nil, err return nil, err
} }
fsInfo, err := fs.NewFsInfo()
if err != nil {
return nil, err
}
filesystems, err := fsInfo.GetGlobalFsInfo() filesystems, err := fsInfo.GetGlobalFsInfo()
if err != nil { if err != nil {
return nil, err glog.Errorf("Failed to get global filesystem information: %v", err)
} }
diskMap, err := sysinfo.GetBlockDeviceInfo(sysFs) diskMap, err := sysinfo.GetBlockDeviceInfo(sysFs)
if err != nil { if err != nil {
return nil, err glog.Errorf("Failed to get disk map: %v", err)
} }
netDevices, err := sysinfo.GetNetworkDevices(sysFs) netDevices, err := sysinfo.GetNetworkDevices(sysFs)
if err != nil { if err != nil {
return nil, err glog.Errorf("Failed to get network devices: %v", err)
} }
topology, numCores, err := getTopology(sysFs, string(cpuinfo)) topology, numCores, err := getTopology(sysFs, string(cpuinfo))
if err != nil { if err != nil {
return nil, err glog.Errorf("Failed to get topology information: %v", err)
} }
systemUUID, err := sysinfo.GetSystemUUID(sysFs) systemUUID, err := sysinfo.GetSystemUUID(sysFs)
if err != nil { if err != nil {
glog.Errorf("Failed to get system UUID: %v", err) glog.Errorf("Failed to get system UUID: %v", err)
systemUUID = ""
} }
machineInfo := &info.MachineInfo{ machineInfo := &info.MachineInfo{

View File

@ -30,6 +30,7 @@ import (
"github.com/google/cadvisor/container/docker" "github.com/google/cadvisor/container/docker"
"github.com/google/cadvisor/container/raw" "github.com/google/cadvisor/container/raw"
"github.com/google/cadvisor/events" "github.com/google/cadvisor/events"
"github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/info/v2" "github.com/google/cadvisor/info/v2"
"github.com/google/cadvisor/storage/memory" "github.com/google/cadvisor/storage/memory"
@ -74,6 +75,10 @@ type Manager interface {
// Get version information about different components we depend on. // Get version information about different components we depend on.
GetVersionInfo() (*info.VersionInfo, error) GetVersionInfo() (*info.VersionInfo, error)
// Get filesystem information for a given label.
// Returns information for all global filesystems if label is empty.
GetFsInfo(label string) ([]v2.FsInfo, error)
// Get events streamed through passedChannel that fit the request. // Get events streamed through passedChannel that fit the request.
WatchForEvents(request *events.Request, passedChannel chan *events.Event) error WatchForEvents(request *events.Request, passedChannel chan *events.Event) error
@ -94,15 +99,21 @@ func New(memoryStorage *memory.InMemoryStorage, sysfs sysfs.SysFs) (Manager, err
} }
glog.Infof("cAdvisor running in container: %q", selfContainer) glog.Infof("cAdvisor running in container: %q", selfContainer)
context := fs.Context{DockerRoot: docker.RootDir()}
fsInfo, err := fs.NewFsInfo(context)
if err != nil {
return nil, err
}
newManager := &manager{ newManager := &manager{
containers: make(map[namespacedContainerName]*containerData), containers: make(map[namespacedContainerName]*containerData),
quitChannels: make([]chan error, 0, 2), quitChannels: make([]chan error, 0, 2),
memoryStorage: memoryStorage, memoryStorage: memoryStorage,
fsInfo: fsInfo,
cadvisorContainer: selfContainer, cadvisorContainer: selfContainer,
startupTime: time.Now(), startupTime: time.Now(),
} }
machineInfo, err := getMachineInfo(sysfs) machineInfo, err := getMachineInfo(sysfs, fsInfo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -119,13 +130,13 @@ func New(memoryStorage *memory.InMemoryStorage, sysfs sysfs.SysFs) (Manager, err
newManager.eventHandler = events.NewEventManager() newManager.eventHandler = events.NewEventManager()
// Register Docker container factory. // Register Docker container factory.
err = docker.Register(newManager) err = docker.Register(newManager, fsInfo)
if err != nil { if err != nil {
glog.Errorf("Docker container factory registration failed: %v.", err) glog.Errorf("Docker container factory registration failed: %v.", err)
} }
// Register the raw driver. // Register the raw driver.
err = raw.Register(newManager) err = raw.Register(newManager, fsInfo)
if err != nil { if err != nil {
glog.Errorf("Registration of the raw container factory failed: %v", err) glog.Errorf("Registration of the raw container factory failed: %v", err)
} }
@ -146,6 +157,7 @@ type manager struct {
containers map[namespacedContainerName]*containerData containers map[namespacedContainerName]*containerData
containersLock sync.RWMutex containersLock sync.RWMutex
memoryStorage *memory.InMemoryStorage memoryStorage *memory.InMemoryStorage
fsInfo fs.FsInfo
machineInfo info.MachineInfo machineInfo info.MachineInfo
versionInfo info.VersionInfo versionInfo info.VersionInfo
quitChannels []chan error quitChannels []chan error
@ -175,8 +187,19 @@ func (self *manager) Start() error {
} }
} }
// Watch for OOMs.
err := self.watchForNewOoms()
if err != nil {
glog.Errorf("Failed to start OOM watcher, will not get OOM events: %v", err)
}
// If there are no factories, don't start any housekeeping and serve the information we do have.
if !container.HasFactories() {
return nil
}
// Create root and then recover all containers. // Create root and then recover all containers.
err := self.createContainer("/") err = self.createContainer("/")
if err != nil { if err != nil {
return err return err
} }
@ -194,10 +217,6 @@ func (self *manager) Start() error {
return err return err
} }
self.quitChannels = append(self.quitChannels, quitWatcher) self.quitChannels = append(self.quitChannels, quitWatcher)
err = self.watchForNewOoms()
if err != nil {
glog.Errorf("Failed to start OOM watcher, will not get OOM events: %v", err)
}
// Look for new containers in the main housekeeping thread. // Look for new containers in the main housekeeping thread.
quitGlobalHousekeeping := make(chan error) quitGlobalHousekeeping := make(chan error)
@ -439,6 +458,45 @@ func (self *manager) GetContainerDerivedStats(containerName string) (v2.DerivedS
return cont.DerivedStats() return cont.DerivedStats()
} }
func (self *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
var empty time.Time
// Get latest data from filesystems hanging off root container.
stats, err := self.memoryStorage.RecentStats("/", empty, empty, 1)
if err != nil {
return nil, err
}
dev := ""
if len(label) != 0 {
dev, err = self.fsInfo.GetDeviceForLabel(label)
if err != nil {
return nil, err
}
}
fsInfo := []v2.FsInfo{}
for _, fs := range stats[0].Filesystem {
if len(label) != 0 && fs.Device != dev {
continue
}
mountpoint, err := self.fsInfo.GetMountpointForDevice(fs.Device)
if err != nil {
return nil, err
}
labels, err := self.fsInfo.GetLabelsForDevice(fs.Device)
if err != nil {
return nil, err
}
fi := v2.FsInfo{
Device: fs.Device,
Mountpoint: mountpoint,
Capacity: fs.Limit,
Usage: fs.Usage,
Labels: labels,
}
fsInfo = append(fsInfo, fi)
}
return fsInfo, nil
}
func (m *manager) GetMachineInfo() (*info.MachineInfo, error) { func (m *manager) GetMachineInfo() (*info.MachineInfo, error) {
// Copy and return the MachineInfo. // Copy and return the MachineInfo.
return &m.machineInfo, nil return &m.machineInfo, nil
@ -695,6 +753,7 @@ func (self *manager) watchForNewContainers(quit chan error) error {
} }
func (self *manager) watchForNewOoms() error { func (self *manager) watchForNewOoms() error {
glog.Infof("Started watching for new ooms in manager")
outStream := make(chan *oomparser.OomInstance, 10) outStream := make(chan *oomparser.OomInstance, 10)
oomLog, err := oomparser.New() oomLog, err := oomparser.New()
if err != nil { if err != nil {

View File

@ -84,3 +84,8 @@ func (c *ManagerMock) GetVersionInfo() (*info.VersionInfo, error) {
args := c.Called() args := c.Called()
return args.Get(0).(*info.VersionInfo), args.Error(1) return args.Get(0).(*info.VersionInfo), args.Error(1)
} }
func (c *ManagerMock) GetFsInfo() ([]v2.FsInfo, error) {
args := c.Called()
return args.Get(0).([]v2.FsInfo), args.Error(1)
}

View File

@ -0,0 +1,366 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrics
import (
"fmt"
"time"
"github.com/golang/glog"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/manager"
"github.com/prometheus/client_golang/prometheus"
)
type prometheusMetric struct {
valueType prometheus.ValueType
value float64
labels []string
}
// PrometheusCollector implements prometheus.Collector.
type PrometheusCollector struct {
manager manager.Manager
errors prometheus.Gauge
lastSeen *prometheus.Desc
cpuUsageUserSeconds *prometheus.Desc
cpuUsageSystemSeconds *prometheus.Desc
cpuUsageSecondsPerCPU *prometheus.Desc
memoryUsageBytes *prometheus.Desc
memoryWorkingSet *prometheus.Desc
memoryFailures *prometheus.Desc
fsLimit *prometheus.Desc
fsUsage *prometheus.Desc
fsReads *prometheus.Desc
fsReadsSectors *prometheus.Desc
fsReadsMerged *prometheus.Desc
fsReadTime *prometheus.Desc
fsWrites *prometheus.Desc
fsWritesSectors *prometheus.Desc
fsWritesMerged *prometheus.Desc
fsWriteTime *prometheus.Desc
fsIoInProgress *prometheus.Desc
fsIoTime *prometheus.Desc
fsWeightedIoTime *prometheus.Desc
networkRxBytes *prometheus.Desc
networkRxPackets *prometheus.Desc
networkRxErrors *prometheus.Desc
networkRxDropped *prometheus.Desc
networkTxBytes *prometheus.Desc
networkTxPackets *prometheus.Desc
networkTxErrors *prometheus.Desc
networkTxDropped *prometheus.Desc
tasks *prometheus.Desc
descs []*prometheus.Desc
}
// NewPrometheusCollector returns a new PrometheusCollector.
func NewPrometheusCollector(manager manager.Manager) *PrometheusCollector {
c := &PrometheusCollector{
manager: manager,
errors: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "container",
Name: "scrape_error",
Help: "1 if there was an error while getting container metrics, 0 otherwise",
}),
lastSeen: prometheus.NewDesc(
"container_last_seen",
"Last time a container was seen by the exporter",
[]string{"name", "id"},
nil),
cpuUsageUserSeconds: prometheus.NewDesc(
"container_cpu_user_seconds_total",
"Cumulative user cpu time consumed in seconds.",
[]string{"name", "id"},
nil),
cpuUsageSystemSeconds: prometheus.NewDesc(
"container_cpu_system_seconds_total",
"Cumulative system cpu time consumed in seconds.",
[]string{"name", "id"},
nil),
cpuUsageSecondsPerCPU: prometheus.NewDesc(
"container_cpu_usage_seconds_total",
"Cumulative cpu time consumed per cpu in seconds.",
[]string{"name", "id", "cpu"},
nil),
memoryUsageBytes: prometheus.NewDesc(
"container_memory_usage_bytes",
"Current memory usage in bytes.",
[]string{"name", "id"},
nil),
memoryWorkingSet: prometheus.NewDesc(
"container_memory_working_set_bytes",
"Current working set in bytes.",
[]string{"name", "id"},
nil),
memoryFailures: prometheus.NewDesc(
"container_memory_failures_total",
"Cumulative count of memory allocation failures.",
[]string{"type", "scope", "name", "id"},
nil),
fsLimit: prometheus.NewDesc(
"container_fs_limit_bytes",
"Number of bytes that can be consumed by the container on this filesystem.",
[]string{"name", "id", "device"},
nil),
fsUsage: prometheus.NewDesc(
"container_fs_usage_bytes",
"Number of bytes that are consumed by the container on this filesystem.",
[]string{"name", "id", "device"},
nil),
fsReads: prometheus.NewDesc(
"container_fs_reads_total",
"Cumulative count of reads completed",
[]string{"name", "id", "device"},
nil),
fsReadsSectors: prometheus.NewDesc(
"container_fs_sector_reads_total",
"Cumulative count of sector reads completed",
[]string{"name", "id", "device"},
nil),
fsReadsMerged: prometheus.NewDesc(
"container_fs_reads_merged_total",
"Cumulative count of reads merged",
[]string{"name", "id", "device"},
nil),
fsReadTime: prometheus.NewDesc(
"container_fs_read_seconds_total",
"Cumulative count of seconds spent reading",
[]string{"name", "id", "device"},
nil),
fsWrites: prometheus.NewDesc(
"container_fs_writes_total",
"Cumulative count of writes completed",
[]string{"name", "id", "device"},
nil),
fsWritesSectors: prometheus.NewDesc(
"container_fs_sector_writes_total",
"Cumulative count of sector writes completed",
[]string{"name", "id", "device"},
nil),
fsWritesMerged: prometheus.NewDesc(
"container_fs_writes_merged_total",
"Cumulative count of writes merged",
[]string{"name", "id", "device"},
nil),
fsWriteTime: prometheus.NewDesc(
"container_fs_write_seconds_total",
"Cumulative count of seconds spent writing",
[]string{"name", "id", "device"},
nil),
fsIoInProgress: prometheus.NewDesc(
"container_fs_io_current",
"Number of I/Os currently in progress",
[]string{"name", "id", "device"},
nil),
fsIoTime: prometheus.NewDesc(
"container_fs_io_time_seconds_total",
"Cumulative count of seconds spent doing I/Os",
[]string{"name", "id", "device"},
nil),
fsWeightedIoTime: prometheus.NewDesc(
"container_fs_io_time_weighted_seconds_total",
"Cumulative weighted I/O time in seconds",
[]string{"name", "id", "device"},
nil),
networkRxBytes: prometheus.NewDesc(
"container_network_receive_bytes_total",
"Cumulative count of bytes received",
[]string{"name", "id"},
nil),
networkRxPackets: prometheus.NewDesc(
"container_network_receive_packets_total",
"Cumulative count of packets received",
[]string{"name", "id"},
nil),
networkRxDropped: prometheus.NewDesc(
"container_network_receive_packets_dropped_total",
"Cumulative count of packets dropped while receiving",
[]string{"name", "id"},
nil),
networkRxErrors: prometheus.NewDesc(
"container_network_receive_errors_total",
"Cumulative count of errors encountered while receiving",
[]string{"name", "id"},
nil),
networkTxBytes: prometheus.NewDesc(
"container_network_transmit_bytes_total",
"Cumulative count of bytes transmitted",
[]string{"name", "id"},
nil),
networkTxPackets: prometheus.NewDesc(
"container_network_transmit_packets_total",
"Cumulative count of packets transmitted",
[]string{"name", "id"},
nil),
networkTxDropped: prometheus.NewDesc(
"container_network_transmit_packets_dropped_total",
"Cumulative count of packets dropped while transmitting",
[]string{"name", "id"},
nil),
networkTxErrors: prometheus.NewDesc(
"container_network_transmit_errors_total",
"Cumulative count of errors encountered while transmitting",
[]string{"name", "id"},
nil),
tasks: prometheus.NewDesc(
"container_tasks_state",
"Number of tasks in given state",
[]string{"state", "name", "id"},
nil),
}
c.descs = []*prometheus.Desc{
c.lastSeen,
c.cpuUsageUserSeconds,
c.cpuUsageSystemSeconds,
c.memoryUsageBytes,
c.memoryWorkingSet,
c.memoryFailures,
c.fsLimit,
c.fsUsage,
c.fsReads,
c.fsReadsSectors,
c.fsReadsMerged,
c.fsReadTime,
c.fsWrites,
c.fsWritesSectors,
c.fsWritesMerged,
c.fsWriteTime,
c.fsIoInProgress,
c.fsIoTime,
c.fsWeightedIoTime,
c.networkRxBytes,
c.networkRxPackets,
c.networkRxErrors,
c.networkRxDropped,
c.networkTxBytes,
c.networkTxPackets,
c.networkTxErrors,
c.networkTxDropped,
c.tasks,
}
return c
}
// Describe describes all the metrics ever exported by cadvisor. It
// implements prometheus.PrometheusCollector.
func (c *PrometheusCollector) Describe(ch chan<- *prometheus.Desc) {
c.errors.Describe(ch)
for _, d := range c.descs {
ch <- d
}
}
// Collect fetches the stats from all containers and delivers them as
// Prometheus metrics. It implements prometheus.PrometheusCollector.
func (c *PrometheusCollector) Collect(ch chan<- prometheus.Metric) {
containers, err := c.manager.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1})
if err != nil {
c.errors.Set(1)
glog.Warning("Couldn't get containers: %s", err)
return
}
for _, container := range containers {
id := container.Name
name := id
if len(container.Aliases) > 0 {
name = container.Aliases[0]
}
stats := container.Stats[0]
for desc, metrics := range map[*prometheus.Desc][]prometheusMetric{
c.cpuUsageUserSeconds: {{valueType: prometheus.CounterValue, value: float64(stats.Cpu.Usage.User) / float64(time.Second)}},
c.cpuUsageSystemSeconds: {{valueType: prometheus.CounterValue, value: float64(stats.Cpu.Usage.System) / float64(time.Second)}},
c.memoryFailures: {
{valueType: prometheus.CounterValue, labels: []string{"pgfault", "container"}, value: float64(stats.Memory.ContainerData.Pgfault)},
{valueType: prometheus.CounterValue, labels: []string{"pgmajfault", "container"}, value: float64(stats.Memory.ContainerData.Pgmajfault)},
{valueType: prometheus.CounterValue, labels: []string{"pgfault", "hierarchy"}, value: float64(stats.Memory.HierarchicalData.Pgfault)},
{valueType: prometheus.CounterValue, labels: []string{"pgmajfault", "hierarchy"}, value: float64(stats.Memory.HierarchicalData.Pgmajfault)},
},
c.tasks: {
{valueType: prometheus.GaugeValue, labels: []string{"sleeping"}, value: float64(stats.TaskStats.NrSleeping)},
{valueType: prometheus.GaugeValue, labels: []string{"running"}, value: float64(stats.TaskStats.NrRunning)},
{valueType: prometheus.GaugeValue, labels: []string{"stopped"}, value: float64(stats.TaskStats.NrStopped)},
{valueType: prometheus.GaugeValue, labels: []string{"uninterruptible"}, value: float64(stats.TaskStats.NrUninterruptible)},
{valueType: prometheus.GaugeValue, labels: []string{"iowaiting"}, value: float64(stats.TaskStats.NrIoWait)},
},
c.lastSeen: {{valueType: prometheus.GaugeValue, value: float64(time.Now().Unix())}},
c.memoryUsageBytes: {{valueType: prometheus.GaugeValue, value: float64(stats.Memory.Usage)}},
c.memoryWorkingSet: {{valueType: prometheus.GaugeValue, value: float64(stats.Memory.WorkingSet)}},
c.networkRxBytes: {{valueType: prometheus.CounterValue, value: float64(stats.Network.RxBytes)}},
c.networkRxPackets: {{valueType: prometheus.CounterValue, value: float64(stats.Network.RxPackets)}},
c.networkRxErrors: {{valueType: prometheus.CounterValue, value: float64(stats.Network.RxErrors)}},
c.networkRxDropped: {{valueType: prometheus.CounterValue, value: float64(stats.Network.RxDropped)}},
c.networkTxBytes: {{valueType: prometheus.CounterValue, value: float64(stats.Network.TxBytes)}},
c.networkTxPackets: {{valueType: prometheus.CounterValue, value: float64(stats.Network.TxPackets)}},
c.networkTxErrors: {{valueType: prometheus.CounterValue, value: float64(stats.Network.TxErrors)}},
c.networkTxDropped: {{valueType: prometheus.CounterValue, value: float64(stats.Network.TxDropped)}},
} {
for _, m := range metrics {
ch <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, float64(m.value), append(m.labels, name, id)...)
}
}
// Metrics with dynamic labels
for i, value := range stats.Cpu.Usage.PerCpu {
ch <- prometheus.MustNewConstMetric(c.cpuUsageSecondsPerCPU, prometheus.CounterValue, float64(value)/float64(time.Second), name, id, fmt.Sprintf("cpu%02d", i))
}
for _, stat := range stats.Filesystem {
for desc, m := range map[*prometheus.Desc]prometheusMetric{
c.fsReads: {valueType: prometheus.CounterValue, value: float64(stat.ReadsCompleted)},
c.fsReadsSectors: {valueType: prometheus.CounterValue, value: float64(stat.SectorsRead)},
c.fsReadsMerged: {valueType: prometheus.CounterValue, value: float64(stat.ReadsMerged)},
c.fsReadTime: {valueType: prometheus.CounterValue, value: float64(stat.ReadTime) / float64(time.Second)},
c.fsWrites: {valueType: prometheus.CounterValue, value: float64(stat.WritesCompleted)},
c.fsWritesSectors: {valueType: prometheus.CounterValue, value: float64(stat.SectorsWritten)},
c.fsWritesMerged: {valueType: prometheus.CounterValue, value: float64(stat.WritesMerged)},
c.fsWriteTime: {valueType: prometheus.CounterValue, value: float64(stat.WriteTime) / float64(time.Second)},
c.fsIoTime: {valueType: prometheus.CounterValue, value: float64(stat.IoTime) / float64(time.Second)},
c.fsWeightedIoTime: {valueType: prometheus.CounterValue, value: float64(stat.WeightedIoTime) / float64(time.Second)},
c.fsIoInProgress: {valueType: prometheus.GaugeValue, value: float64(stat.IoInProgress)},
c.fsLimit: {valueType: prometheus.GaugeValue, value: float64(stat.Limit)},
c.fsUsage: {valueType: prometheus.GaugeValue, value: float64(stat.Usage)},
} {
ch <- prometheus.MustNewConstMetric(desc, m.valueType, m.value, name, id, stat.Device)
}
}
}
c.errors.Collect(ch)
}

View File

@ -35,8 +35,8 @@ const containersHtmlTemplate = `
</head> </head>
<body> <body>
<div class="container theme-showcase" > <div class="container theme-showcase" >
<div class="col-sm-12" id="logo"> <a href="/" class="col-sm-12" id="logo">
</div> </a>
<div class="col-sm-12"> <div class="col-sm-12">
<div class="page-header"> <div class="page-header">
<h1>{{.DisplayName}}</h1> <h1>{{.DisplayName}}</h1>

View File

@ -66,19 +66,18 @@ func getContainerName(line string, currentOomInstance *OomInstance) error {
// gets the pid, name, and date from a line and adds it to oomInstance // gets the pid, name, and date from a line and adds it to oomInstance
func getProcessNamePid(line string, currentOomInstance *OomInstance) (bool, error) { func getProcessNamePid(line string, currentOomInstance *OomInstance) (bool, error) {
reList := lastLineRegexp.FindStringSubmatch(line) reList := lastLineRegexp.FindStringSubmatch(line)
if reList == nil { if reList == nil {
return false, nil return false, nil
} }
const longForm = "Jan _2 15:04:05 2006" const longForm = "Jan _2 15:04:05 2006"
stringYear := strconv.Itoa(time.Now().Year()) stringYear := strconv.Itoa(time.Now().Year())
linetime, err := time.Parse(longForm, reList[1]+" "+stringYear) linetime, err := time.ParseInLocation(longForm, reList[1]+" "+stringYear, time.Local)
if err != nil { if err != nil {
return false, err return false, err
} }
currentOomInstance.TimeOfDeath = linetime currentOomInstance.TimeOfDeath = linetime
if err != nil {
return false, err
}
pid, err := strconv.Atoi(reList[2]) pid, err := strconv.Atoi(reList[2])
if err != nil { if err != nil {
return false, err return false, err
@ -97,28 +96,53 @@ func checkIfStartOfOomMessages(line string) bool {
return false return false
} }
// opens a reader to grab new messages from the Reader object called outPipe // reads the file and sends only complete lines over a channel to analyzeLines.
// opened in PopulateOomInformation. It reads line by line splitting on // Should prevent EOF errors that occur when lines are read before being fully
// the "\n" character. Checks if line might be start or end of an oom message // written to the log. It reads line by line splitting on
// log. Then the // the "\n" character.
// lines are checked against a regexp to check for the pid, process name, etc. func readLinesFromFile(lineChannel chan string, ioreader *bufio.Reader) {
// At the end of an oom message group, AnalyzeLines adds the new oomInstance to linefragment := ""
// oomLog
func (self *OomParser) analyzeLines(ioreader *bufio.Reader, outStream chan *OomInstance) {
var line string var line string
var err error var err error
for true { for true {
for line, err = ioreader.ReadString('\n'); err != nil && err == io.EOF; { line, err = ioreader.ReadString('\n')
if err == io.EOF {
if line != "" {
linefragment += line
}
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
} else if err == nil {
if linefragment != "" {
line = linefragment + line
linefragment = ""
}
lineChannel <- line
} else if err != nil && err != io.EOF {
glog.Errorf("exiting analyzeLinesHelper with error %v", err)
} }
}
}
// Calls goroutine for analyzeLinesHelper, which feeds it complete lines.
// Lines are checked against a regexp to check for the pid, process name, etc.
// At the end of an oom message group, AnalyzeLines adds the new oomInstance to
// oomLog
func (self *OomParser) analyzeLines(ioreader *bufio.Reader, outStream chan *OomInstance) {
lineChannel := make(chan string, 10)
go func() {
readLinesFromFile(lineChannel, ioreader)
}()
for line := range lineChannel {
in_oom_kernel_log := checkIfStartOfOomMessages(line) in_oom_kernel_log := checkIfStartOfOomMessages(line)
if in_oom_kernel_log { if in_oom_kernel_log {
oomCurrentInstance := &OomInstance{ oomCurrentInstance := &OomInstance{
ContainerName: "/", ContainerName: "/",
} }
finished := false finished := false
for err == nil && !finished { for !finished {
err = getContainerName(line, oomCurrentInstance) err := getContainerName(line, oomCurrentInstance)
if err != nil { if err != nil {
glog.Errorf("%v", err) glog.Errorf("%v", err)
} }
@ -126,12 +150,13 @@ func (self *OomParser) analyzeLines(ioreader *bufio.Reader, outStream chan *OomI
if err != nil { if err != nil {
glog.Errorf("%v", err) glog.Errorf("%v", err)
} }
line, err = ioreader.ReadString('\n') line = <-lineChannel
} }
in_oom_kernel_log = false in_oom_kernel_log = false
outStream <- oomCurrentInstance outStream <- oomCurrentInstance
} }
} }
glog.Infof("exiting analyzeLines")
} }
// looks for system files that contain kernel messages and if one is found, sets // looks for system files that contain kernel messages and if one is found, sets

View File

@ -29,7 +29,7 @@ const systemLogFile = "systemOomExampleLog.txt"
func createExpectedContainerOomInstance(t *testing.T) *OomInstance { func createExpectedContainerOomInstance(t *testing.T) *OomInstance {
const longForm = "Jan _2 15:04:05 2006" const longForm = "Jan _2 15:04:05 2006"
deathTime, err := time.Parse(longForm, "Jan 5 15:19:27 2015") deathTime, err := time.ParseInLocation(longForm, "Jan 5 15:19:27 2015", time.Local)
if err != nil { if err != nil {
t.Fatalf("could not parse expected time when creating expected container oom instance. Had error %v", err) t.Fatalf("could not parse expected time when creating expected container oom instance. Had error %v", err)
return nil return nil
@ -44,7 +44,7 @@ func createExpectedContainerOomInstance(t *testing.T) *OomInstance {
func createExpectedSystemOomInstance(t *testing.T) *OomInstance { func createExpectedSystemOomInstance(t *testing.T) *OomInstance {
const longForm = "Jan _2 15:04:05 2006" const longForm = "Jan _2 15:04:05 2006"
deathTime, err := time.Parse(longForm, "Jan 28 19:58:45 2015") deathTime, err := time.ParseInLocation(longForm, "Jan 28 19:58:45 2015", time.Local)
if err != nil { if err != nil {
t.Fatalf("could not parse expected time when creating expected system oom instance. Had error %v", err) t.Fatalf("could not parse expected time when creating expected system oom instance. Had error %v", err)
return nil return nil
@ -86,7 +86,7 @@ func TestGetProcessNamePid(t *testing.T) {
} }
const longForm = "Jan _2 15:04:05 2006" const longForm = "Jan _2 15:04:05 2006"
correctTime, err := time.Parse(longForm, "Jan 21 22:01:49 2015") correctTime, err := time.ParseInLocation(longForm, "Jan 21 22:01:49 2015", time.Local)
couldParseLine, err = getProcessNamePid(endLine, currentOomInstance) couldParseLine, err = getProcessNamePid(endLine, currentOomInstance)
if err != nil { if err != nil {
t.Errorf("good line fed to getProcessNamePid should yield no error, but had error %v", err) t.Errorf("good line fed to getProcessNamePid should yield no error, but had error %v", err)
@ -146,6 +146,7 @@ func helpTestAnalyzeLines(oomCheckInstance *OomInstance, sysFile string, t *test
if *oomCheckInstance != *oomInstance { if *oomCheckInstance != *oomInstance {
t.Errorf("wrong instance returned. Expected %v and got %v", t.Errorf("wrong instance returned. Expected %v and got %v",
oomCheckInstance, oomInstance) oomCheckInstance, oomInstance)
t.Errorf("Container of one was %v and the other %v", oomCheckInstance.ContainerName, oomInstance.ContainerName)
} }
case <-timeout: case <-timeout:
t.Error( t.Error(

View File

@ -126,9 +126,6 @@ function initialize-pool {
kube-push kube-push
mkdir -p "$POOL_PATH/kubernetes/manifests" mkdir -p "$POOL_PATH/kubernetes/manifests"
if [[ "$ENABLE_NODE_MONITORING" == "true" ]]; then
cp "$KUBE_ROOT/cluster/saltbase/salt/cadvisor/cadvisor.manifest" "$POOL_PATH/kubernetes/manifests"
fi
if [[ "$ENABLE_NODE_LOGGING" == "true" ]]; then if [[ "$ENABLE_NODE_LOGGING" == "true" ]]; then
if [[ "$LOGGING_DESTINATION" == "elasticsearch" ]]; then if [[ "$LOGGING_DESTINATION" == "elasticsearch" ]]; then
cp "$KUBE_ROOT/cluster/saltbase/salt/fluentd-es/fluentd-es.manifest" "$POOL_PATH/kubernetes/manifests" cp "$KUBE_ROOT/cluster/saltbase/salt/fluentd-es/fluentd-es.manifest" "$POOL_PATH/kubernetes/manifests"

View File

@ -22,41 +22,6 @@ write_files:
echo "Unpacking release" echo "Unpacking release"
rm -rf /opt/kubernetes || false rm -rf /opt/kubernetes || false
tar xzf /opt/kubernetes.tar.gz -C /opt/ tar xzf /opt/kubernetes.tar.gz -C /opt/
- path: /opt/kubernetes-manifests/cadvisor.manifest
permissions: 0755
content: |
version: v1beta2
id: cadvisor-agent
containers:
- name: cadvisor
image: google/cadvisor:latest
ports:
- name: http
containerPort: 8080
hostPort: 4194
volumeMounts:
- name: varrun
mountPath: /var/run
readOnly: false
- name: varlibdocker
mountPath: /var/lib/docker
readOnly: true
- name: cgroups
mountPath: /sys/fs/cgroup
readOnly: true
volumes:
- name: varrun
source:
hostDir:
path: /var/run
- name: varlibdocker
source:
hostDir:
path: /var/lib/docker
- name: cgroups
source:
hostDir:
path: /sys/fs/cgroup
coreos: coreos:
etcd: etcd:

View File

@ -1,32 +0,0 @@
version: v1beta2
id: cadvisor-agent
containers:
- name: cadvisor
image: google/cadvisor:0.8.0
ports:
- name: http
containerPort: 8080
hostPort: 4194
volumeMounts:
- name: varrun
mountPath: /var/run
readOnly: false
- name: varlibdocker
mountPath: /var/lib/docker
readOnly: true
- name: sysfs
mountPath: /sys
readOnly: true
volumes:
- name: varrun
source:
hostDir:
path: /var/run
- name: varlibdocker
source:
hostDir:
path: /var/lib/docker
- name: sysfs
source:
hostDir:
path: /sys

View File

@ -1,8 +1,3 @@
/etc/kubernetes/manifests/cadvisor.manifest: delete_cadvisor_manifest:
file.managed: file.absent:
- source: salt://cadvisor/cadvisor.manifest - name: /etc/kubernetes/manifests/cadvisor.manifest
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755

View File

@ -72,6 +72,3 @@ kubelet:
- file: /etc/init.d/kubelet - file: /etc/init.d/kubelet
{% endif %} {% endif %}
- file: /var/lib/kubelet/kubernetes_auth - file: /var/lib/kubelet/kubernetes_auth
{% if pillar.get('enable_node_monitoring', '').lower() == 'true' %}
- file: /etc/kubernetes/manifests/cadvisor.manifest
{% endif %}

View File

@ -11,11 +11,9 @@ base:
{% else %} {% else %}
- sdn - sdn
{% endif %} {% endif %}
- cadvisor
- kubelet - kubelet
- kube-proxy - kube-proxy
{% if pillar.get('enable_node_monitoring', '').lower() == 'true' %}
- cadvisor
{% endif %}
{% if pillar.get('enable_node_logging', '').lower() == 'true' %} {% if pillar.get('enable_node_logging', '').lower() == 'true' %}
{% if pillar['logging_destination'] is defined and pillar['logging_destination'] == 'elasticsearch' %} {% if pillar['logging_destination'] is defined and pillar['logging_destination'] == 'elasticsearch' %}
- fluentd-es - fluentd-es

View File

@ -41,6 +41,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
nodeControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/controller" nodeControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/controller"
replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller" replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/empty_dir" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/empty_dir"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
@ -211,17 +212,18 @@ func startComponents(manifestURL string) (apiServerURL string) {
nodeController := nodeControllerPkg.NewNodeController(nil, "", machineList, nodeResources, cl, fakeKubeletClient{}, 10, 5*time.Minute) nodeController := nodeControllerPkg.NewNodeController(nil, "", machineList, nodeResources, cl, fakeKubeletClient{}, 10, 5*time.Minute)
nodeController.Run(5*time.Second, true, false) nodeController.Run(5*time.Second, true, false)
cadvisorInterface := new(cadvisor.Fake)
// Kubelet (localhost) // Kubelet (localhost)
testRootDir := makeTempDirOrDie("kubelet_integ_1.") testRootDir := makeTempDirOrDie("kubelet_integ_1.")
glog.Infof("Using %s as root dir for kubelet #1", testRootDir) glog.Infof("Using %s as root dir for kubelet #1", testRootDir)
kubeletapp.SimpleRunKubelet(cl, &fakeDocker1, machineList[0], testRootDir, manifestURL, "127.0.0.1", 10250, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil) kubeletapp.SimpleRunKubelet(cl, &fakeDocker1, machineList[0], testRootDir, manifestURL, "127.0.0.1", 10250, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface)
// Kubelet (machine) // Kubelet (machine)
// Create a second kubelet so that the guestbook example's two redis slaves both // Create a second kubelet so that the guestbook example's two redis slaves both
// have a place they can schedule. // have a place they can schedule.
testRootDir = makeTempDirOrDie("kubelet_integ_2.") testRootDir = makeTempDirOrDie("kubelet_integ_2.")
glog.Infof("Using %s as root dir for kubelet #2", testRootDir) glog.Infof("Using %s as root dir for kubelet #2", testRootDir)
kubeletapp.SimpleRunKubelet(cl, &fakeDocker2, machineList[1], testRootDir, "", "127.0.0.1", 10251, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil) kubeletapp.SimpleRunKubelet(cl, &fakeDocker2, machineList[1], testRootDir, "", "127.0.0.1", 10251, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface)
return apiServer.URL return apiServer.URL
} }

View File

@ -21,7 +21,6 @@ import (
"fmt" "fmt"
"math/rand" "math/rand"
"net" "net"
"strconv"
"time" "time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
@ -39,7 +38,6 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog" "github.com/golang/glog"
cadvisorClient "github.com/google/cadvisor/client"
"github.com/spf13/pflag" "github.com/spf13/pflag"
) )
@ -69,7 +67,7 @@ type KubeletServer struct {
MinimumGCAge time.Duration MinimumGCAge time.Duration
MaxContainerCount int MaxContainerCount int
AuthPath string AuthPath string
CAdvisorPort uint CadvisorPort uint
OOMScoreAdj int OOMScoreAdj int
APIServerList util.StringList APIServerList util.StringList
ClusterDomain string ClusterDomain string
@ -95,7 +93,7 @@ func NewKubeletServer() *KubeletServer {
EnableDebuggingHandlers: true, EnableDebuggingHandlers: true,
MinimumGCAge: 1 * time.Minute, MinimumGCAge: 1 * time.Minute,
MaxContainerCount: 5, MaxContainerCount: 5,
CAdvisorPort: 4194, CadvisorPort: 4194,
OOMScoreAdj: -900, OOMScoreAdj: -900,
MasterServiceNamespace: api.NamespaceDefault, MasterServiceNamespace: api.NamespaceDefault,
} }
@ -124,7 +122,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.DurationVar(&s.MinimumGCAge, "minimum_container_ttl_duration", s.MinimumGCAge, "Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m'") fs.DurationVar(&s.MinimumGCAge, "minimum_container_ttl_duration", s.MinimumGCAge, "Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m'")
fs.IntVar(&s.MaxContainerCount, "maximum_dead_containers_per_container", s.MaxContainerCount, "Maximum number of old instances of a container to retain per container. Each container takes up some disk space. Default: 5.") fs.IntVar(&s.MaxContainerCount, "maximum_dead_containers_per_container", s.MaxContainerCount, "Maximum number of old instances of a container to retain per container. Each container takes up some disk space. Default: 5.")
fs.StringVar(&s.AuthPath, "auth_path", s.AuthPath, "Path to .kubernetes_auth file, specifying how to authenticate to API server.") fs.StringVar(&s.AuthPath, "auth_path", s.AuthPath, "Path to .kubernetes_auth file, specifying how to authenticate to API server.")
fs.UintVar(&s.CAdvisorPort, "cadvisor_port", s.CAdvisorPort, "The port of the localhost cAdvisor endpoint") fs.UintVar(&s.CadvisorPort, "cadvisor_port", s.CadvisorPort, "The port of the localhost cAdvisor endpoint")
fs.IntVar(&s.OOMScoreAdj, "oom_score_adj", s.OOMScoreAdj, "The oom_score_adj value for kubelet process. Values must be within the range [-1000, 1000]") fs.IntVar(&s.OOMScoreAdj, "oom_score_adj", s.OOMScoreAdj, "The oom_score_adj value for kubelet process. Values must be within the range [-1000, 1000]")
fs.Var(&s.APIServerList, "api_servers", "List of Kubernetes API servers for publishing events, and reading pods and services. (ip:port), comma separated.") fs.Var(&s.APIServerList, "api_servers", "List of Kubernetes API servers for publishing events, and reading pods and services. (ip:port), comma separated.")
fs.StringVar(&s.ClusterDomain, "cluster_domain", s.ClusterDomain, "Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains") fs.StringVar(&s.ClusterDomain, "cluster_domain", s.ClusterDomain, "Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains")
@ -152,6 +150,11 @@ func (s *KubeletServer) Run(_ []string) error {
credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)
cadvisorInterface, err := cadvisor.New(s.CadvisorPort)
if err != nil {
return err
}
kcfg := KubeletConfig{ kcfg := KubeletConfig{
Address: s.Address, Address: s.Address,
AllowPrivileged: s.AllowPrivileged, AllowPrivileged: s.AllowPrivileged,
@ -172,7 +175,7 @@ func (s *KubeletServer) Run(_ []string) error {
ClusterDNS: s.ClusterDNS, ClusterDNS: s.ClusterDNS,
Runonce: s.RunOnce, Runonce: s.RunOnce,
Port: s.Port, Port: s.Port,
CAdvisorPort: s.CAdvisorPort, CadvisorInterface: cadvisorInterface,
EnableServer: s.EnableServer, EnableServer: s.EnableServer,
EnableDebuggingHandlers: s.EnableDebuggingHandlers, EnableDebuggingHandlers: s.EnableDebuggingHandlers,
DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
@ -240,7 +243,8 @@ func SimpleRunKubelet(client *client.Client,
port uint, port uint,
masterServiceNamespace string, masterServiceNamespace string,
volumePlugins []volume.Plugin, volumePlugins []volume.Plugin,
tlsOptions *kubelet.TLSOptions) { tlsOptions *kubelet.TLSOptions,
cadvisorInterface cadvisor.Interface) {
kcfg := KubeletConfig{ kcfg := KubeletConfig{
KubeClient: client, KubeClient: client,
DockerClient: dockerClient, DockerClient: dockerClient,
@ -259,6 +263,7 @@ func SimpleRunKubelet(client *client.Client,
MasterServiceNamespace: masterServiceNamespace, MasterServiceNamespace: masterServiceNamespace,
VolumePlugins: volumePlugins, VolumePlugins: volumePlugins,
TLSOptions: tlsOptions, TLSOptions: tlsOptions,
CadvisorInterface: cadvisorInterface,
} }
RunKubelet(&kcfg) RunKubelet(&kcfg)
} }
@ -336,7 +341,7 @@ func makePodSourceConfig(kc *KubeletConfig) *config.PodConfig {
type KubeletConfig struct { type KubeletConfig struct {
KubeClient *client.Client KubeClient *client.Client
DockerClient dockertools.DockerInterface DockerClient dockertools.DockerInterface
CAdvisorPort uint CadvisorInterface cadvisor.Interface
Address util.IP Address util.IP
AllowPrivileged bool AllowPrivileged bool
HostnameOverride string HostnameOverride string
@ -379,15 +384,6 @@ func createAndInitKubelet(kc *KubeletConfig, pc *config.PodConfig) (*kubelet.Kub
kubeClient = kc.KubeClient kubeClient = kc.KubeClient
} }
cc, err := cadvisorClient.NewClient("http://127.0.0.1:" + strconv.Itoa(int(kc.CAdvisorPort)))
if err != nil {
return nil, err
}
cadvisorInterface, err := cadvisor.New(cc)
if err != nil {
return nil, err
}
k, err := kubelet.NewMainKubelet( k, err := kubelet.NewMainKubelet(
kc.Hostname, kc.Hostname,
kc.DockerClient, kc.DockerClient,
@ -406,7 +402,7 @@ func createAndInitKubelet(kc *KubeletConfig, pc *config.PodConfig) (*kubelet.Kub
kc.VolumePlugins, kc.VolumePlugins,
kc.StreamingConnectionIdleTimeout, kc.StreamingConnectionIdleTimeout,
kc.Recorder, kc.Recorder,
cadvisorInterface, kc.CadvisorInterface,
kc.StatusUpdateFrequency) kc.StatusUpdateFrequency)
if err != nil { if err != nil {

View File

@ -36,6 +36,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
nodeControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/controller" nodeControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/controller"
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller" "github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/master" "github.com/GoogleCloudPlatform/kubernetes/pkg/master"
"github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports" "github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports"
@ -146,7 +147,11 @@ func startComponents(etcdClient tools.EtcdClient, cl *client.Client, addr net.IP
runControllerManager(machineList, cl, *nodeMilliCPU, *nodeMemory) runControllerManager(machineList, cl, *nodeMilliCPU, *nodeMemory)
dockerClient := dockertools.ConnectToDockerOrDie(*dockerEndpoint) dockerClient := dockertools.ConnectToDockerOrDie(*dockerEndpoint)
kubeletapp.SimpleRunKubelet(cl, dockerClient, machineList[0], "/tmp/kubernetes", "", "127.0.0.1", 10250, *masterServiceNamespace, kubeletapp.ProbeVolumePlugins(), nil) cadvisorInterface, err := cadvisor.New(0)
if err != nil {
glog.Fatalf("Failed to create cAdvisor: %v", err)
}
kubeletapp.SimpleRunKubelet(cl, dockerClient, machineList[0], "/tmp/kubernetes", "", "127.0.0.1", 10250, *masterServiceNamespace, kubeletapp.ProbeVolumePlugins(), nil, cadvisorInterface)
} }
func newApiClient(addr net.IP, port int) *client.Client { func newApiClient(addr net.IP, port int) *client.Client {

View File

@ -17,15 +17,23 @@ limitations under the License.
package cadvisor package cadvisor
import ( import (
"github.com/google/cadvisor/client" cadvisorApi "github.com/google/cadvisor/info/v1"
) )
type cadvisorClient struct { // Fake cAdvisor implementation.
*client.Client type Fake struct {
} }
func New(cc *client.Client) (Interface, error) { var _ Interface = new(Fake)
return &cadvisorClient{
Client: cc, func (c *Fake) ContainerInfo(name string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) {
}, nil return new(cadvisorApi.ContainerInfo), nil
}
func (c *Fake) DockerContainer(name string, req *cadvisorApi.ContainerInfoRequest) (cadvisorApi.ContainerInfo, error) {
return cadvisorApi.ContainerInfo{}, nil
}
func (c *Fake) MachineInfo() (*cadvisorApi.MachineInfo, error) {
return new(cadvisorApi.MachineInfo), nil
} }

View File

@ -0,0 +1,112 @@
// +build cgo,linux
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cadvisor
import (
"fmt"
"net/http"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog"
cadvisorHttp "github.com/google/cadvisor/http"
cadvisorApi "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/manager"
"github.com/google/cadvisor/storage/memory"
"github.com/google/cadvisor/utils/sysfs"
)
type cadvisorClient struct {
manager.Manager
}
var _ Interface = new(cadvisorClient)
// TODO(vmarmol): Make configurable.
// The number of stats to keep in memory.
const statsToCache = 60
// Creates a cAdvisor and exports its API on the specified port if port > 0.
func New(port uint) (Interface, error) {
sysFs, err := sysfs.NewRealSysFs()
if err != nil {
return nil, err
}
// Create and start the cAdvisor container manager.
m, err := manager.New(memory.New(statsToCache, nil), sysFs)
if err != nil {
return nil, err
}
err = m.Start()
if err != nil {
return nil, err
}
cadvisorClient := &cadvisorClient{
Manager: m,
}
// Export the HTTP endpoint if a port was specified.
if port > 0 {
err = cadvisorClient.exportHTTP(port)
if err != nil {
return nil, err
}
}
return cadvisorClient, nil
}
func (self *cadvisorClient) exportHTTP(port uint) error {
mux := http.NewServeMux()
err := cadvisorHttp.RegisterHandlers(mux, self, "", "", "", "", "/metrics")
if err != nil {
return err
}
serv := &http.Server{
Addr: fmt.Sprintf(":%d", port),
Handler: mux,
}
// TODO(vmarmol): Remove this when the cAdvisor port is once again free.
// If export failed, retry in the background until we are able to bind.
// This allows an existing cAdvisor to be killed before this one registers.
go func() {
defer util.HandleCrash()
err := serv.ListenAndServe()
for err != nil {
glog.Infof("Failed to register cAdvisor on port %d, retrying. Error: %v", port, err)
time.Sleep(time.Minute)
err = serv.ListenAndServe()
}
}()
return nil
}
func (self *cadvisorClient) ContainerInfo(name string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) {
return self.GetContainerInfo(name, req)
}
func (self *cadvisorClient) MachineInfo() (*cadvisorApi.MachineInfo, error) {
return self.GetMachineInfo()
}

View File

@ -25,19 +25,21 @@ type Mock struct {
mock.Mock mock.Mock
} }
// ContainerInfo is a mock implementation of CadvisorInterface.ContainerInfo. var _ Interface = new(Mock)
// ContainerInfo is a mock implementation of Interface.ContainerInfo.
func (c *Mock) ContainerInfo(name string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) { func (c *Mock) ContainerInfo(name string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) {
args := c.Called(name, req) args := c.Called(name, req)
return args.Get(0).(*cadvisorApi.ContainerInfo), args.Error(1) return args.Get(0).(*cadvisorApi.ContainerInfo), args.Error(1)
} }
// DockerContainer is a mock implementation of CadvisorInterface.DockerContainer. // DockerContainer is a mock implementation of Interface.DockerContainer.
func (c *Mock) DockerContainer(name string, req *cadvisorApi.ContainerInfoRequest) (cadvisorApi.ContainerInfo, error) { func (c *Mock) DockerContainer(name string, req *cadvisorApi.ContainerInfoRequest) (cadvisorApi.ContainerInfo, error) {
args := c.Called(name, req) args := c.Called(name, req)
return args.Get(0).(cadvisorApi.ContainerInfo), args.Error(1) return args.Get(0).(cadvisorApi.ContainerInfo), args.Error(1)
} }
// MachineInfo is a mock implementation of CadvisorInterface.MachineInfo. // MachineInfo is a mock implementation of Interface.MachineInfo.
func (c *Mock) MachineInfo() (*cadvisorApi.MachineInfo, error) { func (c *Mock) MachineInfo() (*cadvisorApi.MachineInfo, error) {
args := c.Called() args := c.Called()
return args.Get(0).(*cadvisorApi.MachineInfo), args.Error(1) return args.Get(0).(*cadvisorApi.MachineInfo), args.Error(1)

View File

@ -0,0 +1,48 @@
// +build !cgo !linux
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cadvisor
import (
"errors"
cadvisorApi "github.com/google/cadvisor/info/v1"
)
type cadvisorUnsupported struct {
}
var _ Interface = new(cadvisorUnsupported)
func New(port uint) (Interface, error) {
return &cadvisorUnsupported{}, nil
}
var unsupportedErr = errors.New("cAdvisor is unsupported in this build")
func (self *cadvisorUnsupported) DockerContainer(name string, req *cadvisorApi.ContainerInfoRequest) (cadvisorApi.ContainerInfo, error) {
return cadvisorApi.ContainerInfo{}, unsupportedErr
}
func (self *cadvisorUnsupported) ContainerInfo(name string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) {
return nil, unsupportedErr
}
func (self *cadvisorUnsupported) MachineInfo() (*cadvisorApi.MachineInfo, error) {
return nil, unsupportedErr
}