mirror of https://github.com/k3s-io/k3s
Update cAdvisor dependency.
Brings with it: - Lower logging - Better event storage and management - Support for container labelspull/6/head
parent
7882d1eeb2
commit
2af756fd55
|
@ -19,11 +19,6 @@
|
|||
"Comment": "null-12",
|
||||
"Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.exp/inotify",
|
||||
"Comment": "null-75",
|
||||
"Rev": "bd8df7009305d6ada223ea3c95b94c0f38bfa119"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/google-api-go-client/compute/v1",
|
||||
"Comment": "release-96",
|
||||
|
@ -206,83 +201,88 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/api",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/collector",
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/events",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/fs",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/healthz",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/http",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v1",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v2",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/manager",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/metrics",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/pages",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/storage",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/summary",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/validate",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/version",
|
||||
"Comment": "0.12.0-21-g3166552",
|
||||
"Rev": "3166552cc4481e48dc1002e000d7e4ae8d5b9850"
|
||||
"Comment": "0.13.0-22-ge3aa15a",
|
||||
"Rev": "e3aa15a3bbee5367532ac56aa55be41441c55356"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
|
@ -429,6 +429,10 @@
|
|||
"ImportPath": "golang.org/x/crypto/ssh",
|
||||
"Rev": "c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/exp/inotify",
|
||||
"Rev": "d00e13ec443927751b2bd49e97dea7bf3b6a6487"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "cbcac7bb8415db9b6cb4d1ebab1dc9afbd688b97"
|
||||
|
|
|
@ -199,28 +199,27 @@ func getEventRequest(r *http.Request) (*events.Request, bool, error) {
|
|||
query.IncludeSubcontainers = newBool
|
||||
}
|
||||
}
|
||||
if val, ok := urlMap["oom_events"]; ok {
|
||||
eventTypes := map[string]info.EventType{
|
||||
"oom_events": info.EventOom,
|
||||
"oom_kill_events": info.EventOomKill,
|
||||
"creation_events": info.EventContainerCreation,
|
||||
"deletion_events": info.EventContainerDeletion,
|
||||
}
|
||||
allEventTypes := false
|
||||
if val, ok := urlMap["all_events"]; ok {
|
||||
newBool, err := strconv.ParseBool(val[0])
|
||||
if err == nil {
|
||||
query.EventType[info.EventOom] = newBool
|
||||
allEventTypes = newBool
|
||||
}
|
||||
}
|
||||
if val, ok := urlMap["oom_kill_events"]; ok {
|
||||
newBool, err := strconv.ParseBool(val[0])
|
||||
if err == nil {
|
||||
query.EventType[info.EventOomKill] = newBool
|
||||
}
|
||||
}
|
||||
if val, ok := urlMap["creation_events"]; ok {
|
||||
newBool, err := strconv.ParseBool(val[0])
|
||||
if err == nil {
|
||||
query.EventType[info.EventContainerCreation] = newBool
|
||||
}
|
||||
}
|
||||
if val, ok := urlMap["deletion_events"]; ok {
|
||||
newBool, err := strconv.ParseBool(val[0])
|
||||
if err == nil {
|
||||
query.EventType[info.EventContainerDeletion] = newBool
|
||||
for opt, eventType := range eventTypes {
|
||||
if allEventTypes {
|
||||
query.EventType[eventType] = true
|
||||
} else if val, ok := urlMap[opt]; ok {
|
||||
newBool, err := strconv.ParseBool(val[0])
|
||||
if err == nil {
|
||||
query.EventType[eventType] = newBool
|
||||
}
|
||||
}
|
||||
}
|
||||
if val, ok := urlMap["max_events"]; ok {
|
||||
|
|
83
Godeps/_workspace/src/github.com/google/cadvisor/collector/collector_manager.go
generated
vendored
Normal file
83
Godeps/_workspace/src/github.com/google/cadvisor/collector/collector_manager.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type collectorManager struct {
|
||||
collectors []*collectorData
|
||||
}
|
||||
|
||||
var _ CollectorManager = &collectorManager{}
|
||||
|
||||
type collectorData struct {
|
||||
collector Collector
|
||||
nextCollectionTime time.Time
|
||||
}
|
||||
|
||||
// Returns a new CollectorManager that is thread-compatible.
|
||||
func NewCollectorManager() (CollectorManager, error) {
|
||||
return &collectorManager{
|
||||
collectors: []*collectorData{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cm *collectorManager) RegisterCollector(collector Collector) error {
|
||||
cm.collectors = append(cm.collectors, &collectorData{
|
||||
collector: collector,
|
||||
nextCollectionTime: time.Now(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *collectorManager) Collect() (time.Time, error) {
|
||||
var errors []error
|
||||
|
||||
// Collect from all collectors that are ready.
|
||||
var next time.Time
|
||||
for _, c := range cm.collectors {
|
||||
if c.nextCollectionTime.Before(time.Now()) {
|
||||
nextCollection, err := c.collector.Collect()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
c.nextCollectionTime = nextCollection
|
||||
}
|
||||
|
||||
// Keep track of the next collector that will be ready.
|
||||
if next.IsZero() || next.After(c.nextCollectionTime) {
|
||||
next = c.nextCollectionTime
|
||||
}
|
||||
}
|
||||
|
||||
return next, compileErrors(errors)
|
||||
}
|
||||
|
||||
// Make an error slice into a single error.
|
||||
func compileErrors(errors []error) error {
|
||||
if len(errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
res := make([]string, len(errors))
|
||||
for i := range errors {
|
||||
res[i] = fmt.Sprintf("Error %d: %v", i, errors[i].Error())
|
||||
}
|
||||
return fmt.Errorf("%s", strings.Join(res, ","))
|
||||
}
|
70
Godeps/_workspace/src/github.com/google/cadvisor/collector/collector_manager_test.go
generated
vendored
Normal file
70
Godeps/_workspace/src/github.com/google/cadvisor/collector/collector_manager_test.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type fakeCollector struct {
|
||||
nextCollectionTime time.Time
|
||||
err error
|
||||
collectedFrom int
|
||||
}
|
||||
|
||||
func (fc *fakeCollector) Collect() (time.Time, error) {
|
||||
fc.collectedFrom++
|
||||
return fc.nextCollectionTime, fc.err
|
||||
}
|
||||
|
||||
func (fc *fakeCollector) Name() string {
|
||||
return "fake-collector"
|
||||
}
|
||||
|
||||
func TestCollect(t *testing.T) {
|
||||
cm := &collectorManager{}
|
||||
|
||||
firstTime := time.Now().Add(-time.Hour)
|
||||
secondTime := time.Now().Add(time.Hour)
|
||||
f1 := &fakeCollector{
|
||||
nextCollectionTime: firstTime,
|
||||
}
|
||||
f2 := &fakeCollector{
|
||||
nextCollectionTime: secondTime,
|
||||
}
|
||||
|
||||
assert := assert.New(t)
|
||||
assert.NoError(cm.RegisterCollector(f1))
|
||||
assert.NoError(cm.RegisterCollector(f2))
|
||||
|
||||
// First collection, everyone gets collected from.
|
||||
nextTime, err := cm.Collect()
|
||||
assert.Equal(firstTime, nextTime)
|
||||
assert.NoError(err)
|
||||
assert.Equal(1, f1.collectedFrom)
|
||||
assert.Equal(1, f2.collectedFrom)
|
||||
|
||||
f1.nextCollectionTime = time.Now().Add(2 * time.Hour)
|
||||
|
||||
// Second collection, only the one that is ready gets collected from.
|
||||
nextTime, err = cm.Collect()
|
||||
assert.Equal(secondTime, nextTime)
|
||||
assert.NoError(err)
|
||||
assert.Equal(2, f1.collectedFrom)
|
||||
assert.Equal(1, f2.collectedFrom)
|
||||
}
|
31
Godeps/_workspace/src/github.com/google/cadvisor/collector/fakes.go
generated
vendored
Normal file
31
Godeps/_workspace/src/github.com/google/cadvisor/collector/fakes.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type FakeCollectorManager struct {
|
||||
}
|
||||
|
||||
func (fkm *FakeCollectorManager) RegisterCollector(collector Collector) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fkm *FakeCollectorManager) Collect() (time.Time, error) {
|
||||
var zero time.Time
|
||||
return zero, nil
|
||||
}
|
45
Godeps/_workspace/src/github.com/google/cadvisor/collector/types.go
generated
vendored
Normal file
45
Godeps/_workspace/src/github.com/google/cadvisor/collector/types.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// TODO(vmarmol): Export to a custom metrics type when that is available.
|
||||
|
||||
// Metric collector.
|
||||
type Collector interface {
|
||||
// Collect metrics from this collector.
|
||||
// Returns the next time this collector should be collected from.
|
||||
// Next collection time is always returned, even when an error occurs.
|
||||
// A collection time of zero means no more collection.
|
||||
Collect() (time.Time, error)
|
||||
|
||||
// Name of this collector.
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Manages and runs collectors.
|
||||
type CollectorManager interface {
|
||||
// Register a collector.
|
||||
RegisterCollector(collector Collector) error
|
||||
|
||||
// Collect from collectors that are ready and return the next time
|
||||
// at which a collector will be ready to collect from.
|
||||
// Next collection time is always returned, even when an error occurs.
|
||||
// A collection time of zero means no more collection.
|
||||
Collect() (time.Time, error)
|
||||
}
|
|
@ -134,17 +134,19 @@ func FullContainerName(dockerId string) string {
|
|||
}
|
||||
|
||||
// Docker handles all containers under /docker
|
||||
func (self *dockerFactory) CanHandle(name string) (bool, error) {
|
||||
func (self *dockerFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
// docker factory accepts all containers it can handle.
|
||||
canAccept := true
|
||||
// Check if the container is known to docker and it is active.
|
||||
id := ContainerNameToDockerId(name)
|
||||
|
||||
// We assume that if Inspect fails then the container is not known to docker.
|
||||
ctnr, err := self.client.InspectContainer(id)
|
||||
if err != nil || !ctnr.State.Running {
|
||||
return false, fmt.Errorf("error inspecting container: %v", err)
|
||||
return false, canAccept, fmt.Errorf("error inspecting container: %v", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
return true, canAccept, nil
|
||||
}
|
||||
|
||||
func parseDockerVersion(full_version_string string) ([]int, error) {
|
||||
|
|
|
@ -68,6 +68,9 @@ type dockerContainerHandler struct {
|
|||
|
||||
// Time at which this container was created.
|
||||
creationTime time.Time
|
||||
|
||||
// Metadata labels associated with the container.
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
func newDockerContainerHandler(
|
||||
|
@ -115,6 +118,7 @@ func newDockerContainerHandler(
|
|||
// Add the name and bare ID as aliases of the container.
|
||||
handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"))
|
||||
handler.aliases = append(handler.aliases, id)
|
||||
handler.labels = ctnr.Config.Labels
|
||||
|
||||
return handler, nil
|
||||
}
|
||||
|
@ -184,6 +188,7 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
|||
if self.usesAufsDriver {
|
||||
spec.HasFilesystem = true
|
||||
}
|
||||
spec.Labels = self.labels
|
||||
|
||||
return spec, err
|
||||
}
|
||||
|
|
|
@ -22,11 +22,11 @@ import (
|
|||
)
|
||||
|
||||
type ContainerHandlerFactory interface {
|
||||
// Create a new ContainerHandler using this factory. CanHandle() must have returned true.
|
||||
NewContainerHandler(name string) (ContainerHandler, error)
|
||||
// Create a new ContainerHandler using this factory. CanHandleAndAccept() must have returned true.
|
||||
NewContainerHandler(name string) (c ContainerHandler, err error)
|
||||
|
||||
// Returns whether this factory can handle the specified container.
|
||||
CanHandle(name string) (bool, error)
|
||||
// Returns whether this factory can handle and accept the specified container.
|
||||
CanHandleAndAccept(name string) (handle bool, accept bool, err error)
|
||||
|
||||
// Name of the factory.
|
||||
String() string
|
||||
|
@ -57,25 +57,30 @@ func HasFactories() bool {
|
|||
}
|
||||
|
||||
// Create a new ContainerHandler for the specified container.
|
||||
func NewContainerHandler(name string) (ContainerHandler, error) {
|
||||
func NewContainerHandler(name string) (ContainerHandler, bool, error) {
|
||||
factoriesLock.RLock()
|
||||
defer factoriesLock.RUnlock()
|
||||
|
||||
// Create the ContainerHandler with the first factory that supports it.
|
||||
for _, factory := range factories {
|
||||
canHandle, err := factory.CanHandle(name)
|
||||
canHandle, canAccept, err := factory.CanHandleAndAccept(name)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Error trying to work out if we can hande %s: %v", name, err)
|
||||
glog.V(4).Infof("Error trying to work out if we can handle %s: %v", name, err)
|
||||
}
|
||||
if canHandle {
|
||||
if !canAccept {
|
||||
glog.V(3).Infof("Factory %q can handle container %q, but ignoring.", factory, name)
|
||||
return nil, false, nil
|
||||
}
|
||||
glog.V(3).Infof("Using factory %q for container %q", factory, name)
|
||||
return factory.NewContainerHandler(name)
|
||||
handle, err := factory.NewContainerHandler(name)
|
||||
return handle, canAccept, err
|
||||
} else {
|
||||
glog.V(4).Infof("Factory %q was unable to handle container %q", factory, name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no known factory can handle creation of container")
|
||||
return nil, false, fmt.Errorf("no known factory can handle creation of container")
|
||||
}
|
||||
|
||||
// Clear the known factories.
|
||||
|
|
|
@ -24,14 +24,15 @@ type mockContainerHandlerFactory struct {
|
|||
mock.Mock
|
||||
Name string
|
||||
CanHandleValue bool
|
||||
CanAcceptValue bool
|
||||
}
|
||||
|
||||
func (self *mockContainerHandlerFactory) String() string {
|
||||
return self.Name
|
||||
}
|
||||
|
||||
func (self *mockContainerHandlerFactory) CanHandle(name string) (bool, error) {
|
||||
return self.CanHandleValue, nil
|
||||
func (self *mockContainerHandlerFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
return self.CanHandleValue, self.CanAcceptValue, nil
|
||||
}
|
||||
|
||||
func (self *mockContainerHandlerFactory) NewContainerHandler(name string) (ContainerHandler, error) {
|
||||
|
@ -50,6 +51,7 @@ func TestNewContainerHandler_FirstMatches(t *testing.T) {
|
|||
allwaysYes := &mockContainerHandlerFactory{
|
||||
Name: "yes",
|
||||
CanHandleValue: true,
|
||||
CanAcceptValue: true,
|
||||
}
|
||||
RegisterContainerHandlerFactory(allwaysYes)
|
||||
|
||||
|
@ -60,7 +62,7 @@ func TestNewContainerHandler_FirstMatches(t *testing.T) {
|
|||
}
|
||||
allwaysYes.On("NewContainerHandler", testContainerName).Return(mockContainer, nil)
|
||||
|
||||
cont, err := NewContainerHandler(testContainerName)
|
||||
cont, _, err := NewContainerHandler(testContainerName)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -76,11 +78,13 @@ func TestNewContainerHandler_SecondMatches(t *testing.T) {
|
|||
allwaysNo := &mockContainerHandlerFactory{
|
||||
Name: "no",
|
||||
CanHandleValue: false,
|
||||
CanAcceptValue: true,
|
||||
}
|
||||
RegisterContainerHandlerFactory(allwaysNo)
|
||||
allwaysYes := &mockContainerHandlerFactory{
|
||||
Name: "yes",
|
||||
CanHandleValue: true,
|
||||
CanAcceptValue: true,
|
||||
}
|
||||
RegisterContainerHandlerFactory(allwaysYes)
|
||||
|
||||
|
@ -91,7 +95,7 @@ func TestNewContainerHandler_SecondMatches(t *testing.T) {
|
|||
}
|
||||
allwaysYes.On("NewContainerHandler", testContainerName).Return(mockContainer, nil)
|
||||
|
||||
cont, err := NewContainerHandler(testContainerName)
|
||||
cont, _, err := NewContainerHandler(testContainerName)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -107,16 +111,44 @@ func TestNewContainerHandler_NoneMatch(t *testing.T) {
|
|||
allwaysNo1 := &mockContainerHandlerFactory{
|
||||
Name: "no",
|
||||
CanHandleValue: false,
|
||||
CanAcceptValue: true,
|
||||
}
|
||||
RegisterContainerHandlerFactory(allwaysNo1)
|
||||
allwaysNo2 := &mockContainerHandlerFactory{
|
||||
Name: "no",
|
||||
CanHandleValue: false,
|
||||
CanAcceptValue: true,
|
||||
}
|
||||
RegisterContainerHandlerFactory(allwaysNo2)
|
||||
|
||||
_, err := NewContainerHandler(testContainerName)
|
||||
_, _, err := NewContainerHandler(testContainerName)
|
||||
if err == nil {
|
||||
t.Error("Expected NewContainerHandler to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewContainerHandler_Accept(t *testing.T) {
|
||||
ClearContainerHandlerFactories()
|
||||
|
||||
// Register handler that can handle the container, but can't accept it.
|
||||
cannotHandle := &mockContainerHandlerFactory{
|
||||
Name: "no",
|
||||
CanHandleValue: false,
|
||||
CanAcceptValue: true,
|
||||
}
|
||||
RegisterContainerHandlerFactory(cannotHandle)
|
||||
cannotAccept := &mockContainerHandlerFactory{
|
||||
Name: "no",
|
||||
CanHandleValue: true,
|
||||
CanAcceptValue: false,
|
||||
}
|
||||
RegisterContainerHandlerFactory(cannotAccept)
|
||||
|
||||
_, accept, err := NewContainerHandler(testContainerName)
|
||||
if err != nil {
|
||||
t.Error("Expected NewContainerHandler to succeed")
|
||||
}
|
||||
if accept == true {
|
||||
t.Error("Expected NewContainerHandler to ignore the container.")
|
||||
}
|
||||
}
|
||||
|
|
145
Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
145
Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
|
@ -100,14 +100,34 @@ func DockerStateDir(dockerRoot string) string {
|
|||
return path.Join(dockerRoot, "containers")
|
||||
}
|
||||
|
||||
func DiskStatsCopy0(major, minor uint64) *info.PerDiskStats {
|
||||
disk := info.PerDiskStats{
|
||||
Major: major,
|
||||
Minor: minor,
|
||||
}
|
||||
disk.Stats = make(map[string]uint64)
|
||||
return &disk
|
||||
}
|
||||
|
||||
type DiskKey struct {
|
||||
Major uint64
|
||||
Minor uint64
|
||||
}
|
||||
|
||||
func DiskStatsCopy1(disk_stat map[DiskKey]*info.PerDiskStats) []info.PerDiskStats {
|
||||
i := 0
|
||||
stat := make([]info.PerDiskStats, len(disk_stat))
|
||||
for _, disk := range disk_stat {
|
||||
stat[i] = *disk
|
||||
i++
|
||||
}
|
||||
return stat
|
||||
}
|
||||
|
||||
func DiskStatsCopy(blkio_stats []cgroups.BlkioStatEntry) (stat []info.PerDiskStats) {
|
||||
if len(blkio_stats) == 0 {
|
||||
return
|
||||
}
|
||||
type DiskKey struct {
|
||||
Major uint64
|
||||
Minor uint64
|
||||
}
|
||||
disk_stat := make(map[DiskKey]*info.PerDiskStats)
|
||||
for i := range blkio_stats {
|
||||
major := blkio_stats[i].Major
|
||||
|
@ -118,12 +138,7 @@ func DiskStatsCopy(blkio_stats []cgroups.BlkioStatEntry) (stat []info.PerDiskSta
|
|||
}
|
||||
diskp, ok := disk_stat[disk_key]
|
||||
if !ok {
|
||||
disk := info.PerDiskStats{
|
||||
Major: major,
|
||||
Minor: minor,
|
||||
}
|
||||
disk.Stats = make(map[string]uint64)
|
||||
diskp = &disk
|
||||
diskp = DiskStatsCopy0(major, minor)
|
||||
disk_stat[disk_key] = diskp
|
||||
}
|
||||
op := blkio_stats[i].Op
|
||||
|
@ -132,68 +147,76 @@ func DiskStatsCopy(blkio_stats []cgroups.BlkioStatEntry) (stat []info.PerDiskSta
|
|||
}
|
||||
diskp.Stats[op] = blkio_stats[i].Value
|
||||
}
|
||||
i := 0
|
||||
stat = make([]info.PerDiskStats, len(disk_stat))
|
||||
for _, disk := range disk_stat {
|
||||
stat[i] = *disk
|
||||
i++
|
||||
}
|
||||
return
|
||||
return DiskStatsCopy1(disk_stat)
|
||||
}
|
||||
|
||||
// Convert libcontainer stats to info.ContainerStats.
|
||||
func toContainerStats0(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode
|
||||
ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode
|
||||
n := len(s.CpuStats.CpuUsage.PercpuUsage)
|
||||
ret.Cpu.Usage.PerCpu = make([]uint64, n)
|
||||
|
||||
ret.Cpu.Usage.Total = 0
|
||||
for i := 0; i < n; i++ {
|
||||
ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i]
|
||||
ret.Cpu.Usage.Total += s.CpuStats.CpuUsage.PercpuUsage[i]
|
||||
}
|
||||
}
|
||||
|
||||
func toContainerStats1(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive)
|
||||
ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive)
|
||||
ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive)
|
||||
ret.DiskIo.Sectors = DiskStatsCopy(s.BlkioStats.SectorsRecursive)
|
||||
ret.DiskIo.IoServiceTime = DiskStatsCopy(s.BlkioStats.IoServiceTimeRecursive)
|
||||
ret.DiskIo.IoWaitTime = DiskStatsCopy(s.BlkioStats.IoWaitTimeRecursive)
|
||||
ret.DiskIo.IoMerged = DiskStatsCopy(s.BlkioStats.IoMergedRecursive)
|
||||
ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive)
|
||||
}
|
||||
|
||||
func toContainerStats2(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.Usage = s.MemoryStats.Usage
|
||||
if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgfault = v
|
||||
ret.Memory.HierarchicalData.Pgfault = v
|
||||
}
|
||||
if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgmajfault = v
|
||||
ret.Memory.HierarchicalData.Pgmajfault = v
|
||||
}
|
||||
if v, ok := s.MemoryStats.Stats["total_inactive_anon"]; ok {
|
||||
ret.Memory.WorkingSet = ret.Memory.Usage - v
|
||||
if v, ok := s.MemoryStats.Stats["total_active_file"]; ok {
|
||||
ret.Memory.WorkingSet -= v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func toContainerStats3(libcontainerStats *libcontainer.Stats, ret *info.ContainerStats) {
|
||||
// TODO(vmarmol): Handle multiple interfaces.
|
||||
ret.Network.RxBytes = libcontainerStats.Interfaces[0].RxBytes
|
||||
ret.Network.RxPackets = libcontainerStats.Interfaces[0].RxPackets
|
||||
ret.Network.RxErrors = libcontainerStats.Interfaces[0].RxErrors
|
||||
ret.Network.RxDropped = libcontainerStats.Interfaces[0].RxDropped
|
||||
ret.Network.TxBytes = libcontainerStats.Interfaces[0].TxBytes
|
||||
ret.Network.TxPackets = libcontainerStats.Interfaces[0].TxPackets
|
||||
ret.Network.TxErrors = libcontainerStats.Interfaces[0].TxErrors
|
||||
ret.Network.TxDropped = libcontainerStats.Interfaces[0].TxDropped
|
||||
}
|
||||
|
||||
func toContainerStats(libcontainerStats *libcontainer.Stats) *info.ContainerStats {
|
||||
s := libcontainerStats.CgroupStats
|
||||
ret := new(info.ContainerStats)
|
||||
ret.Timestamp = time.Now()
|
||||
|
||||
if s != nil {
|
||||
ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode
|
||||
ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode
|
||||
n := len(s.CpuStats.CpuUsage.PercpuUsage)
|
||||
ret.Cpu.Usage.PerCpu = make([]uint64, n)
|
||||
|
||||
ret.Cpu.Usage.Total = 0
|
||||
for i := 0; i < n; i++ {
|
||||
ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i]
|
||||
ret.Cpu.Usage.Total += s.CpuStats.CpuUsage.PercpuUsage[i]
|
||||
}
|
||||
|
||||
ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive)
|
||||
ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive)
|
||||
ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive)
|
||||
ret.DiskIo.Sectors = DiskStatsCopy(s.BlkioStats.SectorsRecursive)
|
||||
ret.DiskIo.IoServiceTime = DiskStatsCopy(s.BlkioStats.IoServiceTimeRecursive)
|
||||
ret.DiskIo.IoWaitTime = DiskStatsCopy(s.BlkioStats.IoWaitTimeRecursive)
|
||||
ret.DiskIo.IoMerged = DiskStatsCopy(s.BlkioStats.IoMergedRecursive)
|
||||
ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive)
|
||||
|
||||
ret.Memory.Usage = s.MemoryStats.Usage
|
||||
if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgfault = v
|
||||
ret.Memory.HierarchicalData.Pgfault = v
|
||||
}
|
||||
if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgmajfault = v
|
||||
ret.Memory.HierarchicalData.Pgmajfault = v
|
||||
}
|
||||
if v, ok := s.MemoryStats.Stats["total_inactive_anon"]; ok {
|
||||
ret.Memory.WorkingSet = ret.Memory.Usage - v
|
||||
if v, ok := s.MemoryStats.Stats["total_active_file"]; ok {
|
||||
ret.Memory.WorkingSet -= v
|
||||
}
|
||||
}
|
||||
toContainerStats0(s, ret)
|
||||
toContainerStats1(s, ret)
|
||||
toContainerStats2(s, ret)
|
||||
}
|
||||
if len(libcontainerStats.Interfaces) > 0 {
|
||||
// TODO(vmarmol): Handle multiple interfaces.
|
||||
ret.Network.RxBytes = libcontainerStats.Interfaces[0].RxBytes
|
||||
ret.Network.RxPackets = libcontainerStats.Interfaces[0].RxPackets
|
||||
ret.Network.RxErrors = libcontainerStats.Interfaces[0].RxErrors
|
||||
ret.Network.RxDropped = libcontainerStats.Interfaces[0].RxDropped
|
||||
ret.Network.TxBytes = libcontainerStats.Interfaces[0].TxBytes
|
||||
ret.Network.TxPackets = libcontainerStats.Interfaces[0].TxPackets
|
||||
ret.Network.TxErrors = libcontainerStats.Interfaces[0].TxErrors
|
||||
ret.Network.TxDropped = libcontainerStats.Interfaces[0].TxDropped
|
||||
toContainerStats3(libcontainerStats, ret)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
package raw
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -24,6 +25,8 @@ import (
|
|||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
var dockerOnly = flag.Bool("docker_only", false, "Only report docker containers in addition to root stats")
|
||||
|
||||
type rawFactory struct {
|
||||
// Factory for machine information.
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
@ -43,9 +46,10 @@ func (self *rawFactory) NewContainerHandler(name string) (container.ContainerHan
|
|||
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo)
|
||||
}
|
||||
|
||||
// The raw factory can handle any container.
|
||||
func (self *rawFactory) CanHandle(name string) (bool, error) {
|
||||
return true, nil
|
||||
// The raw factory can handle any container. If --docker_only is set to false, non-docker containers are ignored.
|
||||
func (self *rawFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
accept := name == "/" || !*dockerOnly
|
||||
return true, accept, nil
|
||||
}
|
||||
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo) error {
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"code.google.com/p/go.exp/inotify"
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
cgroup_fs "github.com/docker/libcontainer/cgroups/fs"
|
||||
"github.com/docker/libcontainer/configs"
|
||||
|
@ -34,6 +33,7 @@ import (
|
|||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils"
|
||||
"golang.org/x/exp/inotify"
|
||||
)
|
||||
|
||||
type rawContainerHandler struct {
|
||||
|
|
|
@ -23,20 +23,21 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils"
|
||||
)
|
||||
|
||||
type EventSlice []*info.Event
|
||||
type byTimestamp []*info.Event
|
||||
|
||||
// functions necessary to implement the sort interface on the Events struct
|
||||
func (e EventSlice) Len() int {
|
||||
func (e byTimestamp) Len() int {
|
||||
return len(e)
|
||||
}
|
||||
|
||||
func (e EventSlice) Swap(i, j int) {
|
||||
func (e byTimestamp) Swap(i, j int) {
|
||||
e[i], e[j] = e[j], e[i]
|
||||
}
|
||||
|
||||
func (e EventSlice) Less(i, j int) bool {
|
||||
func (e byTimestamp) Less(i, j int) bool {
|
||||
return e[i].Timestamp.Before(e[j].Timestamp)
|
||||
}
|
||||
|
||||
|
@ -79,7 +80,7 @@ type EventManager interface {
|
|||
// On successful registration, an EventChannel object is returned.
|
||||
WatchEvents(request *Request) (*EventChannel, error)
|
||||
// GetEvents() returns all detected events based on the filters specified in request.
|
||||
GetEvents(request *Request) (EventSlice, error)
|
||||
GetEvents(request *Request) ([]*info.Event, error)
|
||||
// AddEvent allows the caller to add an event to an EventManager
|
||||
// object
|
||||
AddEvent(e *info.Event) error
|
||||
|
@ -89,17 +90,18 @@ type EventManager interface {
|
|||
|
||||
// events provides an implementation for the EventManager interface.
|
||||
type events struct {
|
||||
// eventList holds the complete set of events found over an
|
||||
// EventManager events instantiation.
|
||||
eventList EventSlice
|
||||
// eventStore holds the events by event type.
|
||||
eventStore map[info.EventType]*utils.TimedStore
|
||||
// map of registered watchers keyed by watch id.
|
||||
watchers map[int]*watch
|
||||
// lock guarding the eventList.
|
||||
// lock guarding the eventStore.
|
||||
eventsLock sync.RWMutex
|
||||
// lock guarding watchers.
|
||||
watcherLock sync.RWMutex
|
||||
// last allocated watch id.
|
||||
lastId int
|
||||
// Event storage policy.
|
||||
storagePolicy StoragePolicy
|
||||
}
|
||||
|
||||
// initialized by a call to WatchEvents(), a watch struct will then be added
|
||||
|
@ -125,11 +127,34 @@ func NewEventChannel(watchId int) *EventChannel {
|
|||
}
|
||||
}
|
||||
|
||||
// returns a pointer to an initialized Events object
|
||||
func NewEventManager() *events {
|
||||
// Policy specifying how many events to store.
|
||||
// MaxAge is the max duration for which to keep events.
|
||||
// MaxNumEvents is the max number of events to keep (-1 for no limit).
|
||||
type StoragePolicy struct {
|
||||
// Defaults limites, used if a per-event limit is not set.
|
||||
DefaultMaxAge time.Duration
|
||||
DefaultMaxNumEvents int
|
||||
|
||||
// Per-event type limits.
|
||||
PerTypeMaxAge map[info.EventType]time.Duration
|
||||
PerTypeMaxNumEvents map[info.EventType]int
|
||||
}
|
||||
|
||||
func DefaultStoragePolicy() StoragePolicy {
|
||||
return StoragePolicy{
|
||||
DefaultMaxAge: 24 * time.Hour,
|
||||
DefaultMaxNumEvents: 100000,
|
||||
PerTypeMaxAge: make(map[info.EventType]time.Duration),
|
||||
PerTypeMaxNumEvents: make(map[info.EventType]int),
|
||||
}
|
||||
}
|
||||
|
||||
// returns a pointer to an initialized Events object.
|
||||
func NewEventManager(storagePolicy StoragePolicy) *events {
|
||||
return &events{
|
||||
eventList: make(EventSlice, 0),
|
||||
watchers: make(map[int]*watch),
|
||||
eventStore: make(map[info.EventType]*utils.TimedStore, 0),
|
||||
watchers: make(map[int]*watch),
|
||||
storagePolicy: storagePolicy,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,13 +184,13 @@ func (self *EventChannel) GetWatchId() int {
|
|||
}
|
||||
|
||||
// sorts and returns up to the last MaxEventsReturned chronological elements
|
||||
func getMaxEventsReturned(request *Request, eSlice EventSlice) EventSlice {
|
||||
sort.Sort(eSlice)
|
||||
func getMaxEventsReturned(request *Request, eSlice []*info.Event) []*info.Event {
|
||||
sort.Sort(byTimestamp(eSlice))
|
||||
n := request.MaxEventsReturned
|
||||
if n >= eSlice.Len() || n <= 0 {
|
||||
if n >= len(eSlice) || n <= 0 {
|
||||
return eSlice
|
||||
}
|
||||
return eSlice[eSlice.Len()-n:]
|
||||
return eSlice[len(eSlice)-n:]
|
||||
}
|
||||
|
||||
// If the request wants all subcontainers, this returns if the request's
|
||||
|
@ -194,7 +219,7 @@ func checkIfEventSatisfiesRequest(request *Request, event *info.Event) bool {
|
|||
return false
|
||||
}
|
||||
}
|
||||
if request.EventType[event.EventType] != true {
|
||||
if !request.EventType[event.EventType] {
|
||||
return false
|
||||
}
|
||||
if request.ContainerName != "" {
|
||||
|
@ -203,18 +228,30 @@ func checkIfEventSatisfiesRequest(request *Request, event *info.Event) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// method of Events object that screens Event objects found in the eventList
|
||||
// method of Events object that screens Event objects found in the eventStore
|
||||
// attribute and if they fit the parameters passed by the Request object,
|
||||
// adds it to a slice of *Event objects that is returned. If both MaxEventsReturned
|
||||
// and StartTime/EndTime are specified in the request object, then only
|
||||
// up to the most recent MaxEventsReturned events in that time range are returned.
|
||||
func (self *events) GetEvents(request *Request) (EventSlice, error) {
|
||||
returnEventList := EventSlice{}
|
||||
func (self *events) GetEvents(request *Request) ([]*info.Event, error) {
|
||||
returnEventList := []*info.Event{}
|
||||
self.eventsLock.RLock()
|
||||
defer self.eventsLock.RUnlock()
|
||||
for _, e := range self.eventList {
|
||||
if checkIfEventSatisfiesRequest(request, e) {
|
||||
returnEventList = append(returnEventList, e)
|
||||
for eventType, fetch := range request.EventType {
|
||||
if !fetch {
|
||||
continue
|
||||
}
|
||||
evs, ok := self.eventStore[eventType]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
res := evs.InTimeRange(request.StartTime, request.EndTime, request.MaxEventsReturned)
|
||||
for _, in := range res {
|
||||
e := in.(*info.Event)
|
||||
if checkIfEventSatisfiesRequest(request, e) {
|
||||
returnEventList = append(returnEventList, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
returnEventList = getMaxEventsReturned(request, returnEventList)
|
||||
|
@ -241,11 +278,23 @@ func (self *events) WatchEvents(request *Request) (*EventChannel, error) {
|
|||
return returnEventChannel, nil
|
||||
}
|
||||
|
||||
// helper function to update the event manager's eventList
|
||||
func (self *events) updateEventList(e *info.Event) {
|
||||
// helper function to update the event manager's eventStore
|
||||
func (self *events) updateEventStore(e *info.Event) {
|
||||
self.eventsLock.Lock()
|
||||
defer self.eventsLock.Unlock()
|
||||
self.eventList = append(self.eventList, e)
|
||||
if _, ok := self.eventStore[e.EventType]; !ok {
|
||||
maxAge := self.storagePolicy.DefaultMaxAge
|
||||
maxNumEvents := self.storagePolicy.DefaultMaxNumEvents
|
||||
if age, ok := self.storagePolicy.PerTypeMaxAge[e.EventType]; ok {
|
||||
maxAge = age
|
||||
}
|
||||
if numEvents, ok := self.storagePolicy.PerTypeMaxNumEvents[e.EventType]; ok {
|
||||
maxNumEvents = numEvents
|
||||
}
|
||||
|
||||
self.eventStore[e.EventType] = utils.NewTimedStore(maxAge, maxNumEvents)
|
||||
}
|
||||
self.eventStore[e.EventType].Add(e.Timestamp, e)
|
||||
}
|
||||
|
||||
func (self *events) findValidWatchers(e *info.Event) []*watch {
|
||||
|
@ -260,10 +309,10 @@ func (self *events) findValidWatchers(e *info.Event) []*watch {
|
|||
}
|
||||
|
||||
// method of Events object that adds the argument Event object to the
|
||||
// eventList. It also feeds the event to a set of watch channels
|
||||
// eventStore. It also feeds the event to a set of watch channels
|
||||
// held by the manager if it satisfies the request keys of the channels
|
||||
func (self *events) AddEvent(e *info.Event) error {
|
||||
self.updateEventList(e)
|
||||
self.updateEventStore(e)
|
||||
self.watcherLock.RLock()
|
||||
defer self.watcherLock.RUnlock()
|
||||
watchesToSend := self.findValidWatchers(e)
|
||||
|
|
|
@ -47,7 +47,7 @@ func initializeScenario(t *testing.T) (*events, *Request, *info.Event, *info.Eve
|
|||
fakeEvent := makeEvent(createOldTime(t), "/")
|
||||
fakeEvent2 := makeEvent(time.Now(), "/")
|
||||
|
||||
return NewEventManager(), NewRequest(), fakeEvent, fakeEvent2
|
||||
return NewEventManager(DefaultStoragePolicy()), NewRequest(), fakeEvent, fakeEvent2
|
||||
}
|
||||
|
||||
func checkNumberOfEvents(t *testing.T, numEventsExpected int, numEventsReceived int) {
|
||||
|
@ -150,8 +150,8 @@ func TestAddEventAddsEventsToEventManager(t *testing.T) {
|
|||
|
||||
myEventHolder.AddEvent(fakeEvent)
|
||||
|
||||
checkNumberOfEvents(t, 1, myEventHolder.eventList.Len())
|
||||
ensureProperEventReturned(t, fakeEvent, myEventHolder.eventList[0])
|
||||
checkNumberOfEvents(t, 1, len(myEventHolder.eventStore))
|
||||
ensureProperEventReturned(t, fakeEvent, myEventHolder.eventStore[info.EventOom].Get(0).(*info.Event))
|
||||
}
|
||||
|
||||
func TestGetEventsForOneEvent(t *testing.T) {
|
||||
|
@ -164,7 +164,7 @@ func TestGetEventsForOneEvent(t *testing.T) {
|
|||
|
||||
receivedEvents, err := myEventHolder.GetEvents(myRequest)
|
||||
assert.Nil(t, err)
|
||||
checkNumberOfEvents(t, 1, receivedEvents.Len())
|
||||
checkNumberOfEvents(t, 1, len(receivedEvents))
|
||||
ensureProperEventReturned(t, fakeEvent2, receivedEvents[0])
|
||||
}
|
||||
|
||||
|
@ -180,7 +180,7 @@ func TestGetEventsForTimePeriod(t *testing.T) {
|
|||
receivedEvents, err := myEventHolder.GetEvents(myRequest)
|
||||
assert.Nil(t, err)
|
||||
|
||||
checkNumberOfEvents(t, 1, receivedEvents.Len())
|
||||
checkNumberOfEvents(t, 1, len(receivedEvents))
|
||||
ensureProperEventReturned(t, fakeEvent, receivedEvents[0])
|
||||
}
|
||||
|
||||
|
@ -192,5 +192,5 @@ func TestGetEventsForNoTypeRequested(t *testing.T) {
|
|||
|
||||
receivedEvents, err := myEventHolder.GetEvents(myRequest)
|
||||
assert.Nil(t, err)
|
||||
checkNumberOfEvents(t, 0, receivedEvents.Len())
|
||||
checkNumberOfEvents(t, 0, len(receivedEvents))
|
||||
}
|
||||
|
|
|
@ -43,6 +43,9 @@ type ContainerSpec struct {
|
|||
// Time at which the container was created.
|
||||
CreationTime time.Time `json:"creation_time,omitempty"`
|
||||
|
||||
// Metadata labels associated with this container.
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
|
||||
HasCpu bool `json:"has_cpu"`
|
||||
Cpu CpuSpec `json:"cpu,omitempty"`
|
||||
|
||||
|
@ -505,19 +508,10 @@ const (
|
|||
|
||||
// Extra information about an event. Only one type will be set.
|
||||
type EventData struct {
|
||||
// Information about a container creation event.
|
||||
Created *CreatedEventData `json:"created,omitempty"`
|
||||
|
||||
// Information about an OOM kill event.
|
||||
OomKill *OomKillEventData `json:"oom,omitempty"`
|
||||
}
|
||||
|
||||
// Information related to a container creation event.
|
||||
type CreatedEventData struct {
|
||||
// Spec of the container at creation.
|
||||
Spec ContainerSpec `json:"spec"`
|
||||
}
|
||||
|
||||
// Information related to an OOM kill instance
|
||||
type OomKillEventData struct {
|
||||
// process id of the killed process
|
||||
|
|
|
@ -64,6 +64,9 @@ type ContainerSpec struct {
|
|||
// An example of a namespace is "docker" for Docker containers.
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
|
||||
// Metadata labels associated with this container.
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
|
||||
HasCpu bool `json:"has_cpu"`
|
||||
Cpu CpuSpec `json:"cpu,omitempty"`
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/pkg/units"
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/collector"
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/info/v2"
|
||||
|
@ -63,6 +64,9 @@ type containerData struct {
|
|||
|
||||
// Tells the container to stop.
|
||||
stop chan bool
|
||||
|
||||
// Runs custom metric collectors.
|
||||
collectorManager collector.CollectorManager
|
||||
}
|
||||
|
||||
func (c *containerData) Start() error {
|
||||
|
@ -109,7 +113,7 @@ func (c *containerData) DerivedStats() (v2.DerivedStats, error) {
|
|||
return c.summaryReader.DerivedStats()
|
||||
}
|
||||
|
||||
func newContainerData(containerName string, memoryStorage *memory.InMemoryStorage, handler container.ContainerHandler, loadReader cpuload.CpuLoadReader, logUsage bool) (*containerData, error) {
|
||||
func newContainerData(containerName string, memoryStorage *memory.InMemoryStorage, handler container.ContainerHandler, loadReader cpuload.CpuLoadReader, logUsage bool, collectorManager collector.CollectorManager) (*containerData, error) {
|
||||
if memoryStorage == nil {
|
||||
return nil, fmt.Errorf("nil memory storage")
|
||||
}
|
||||
|
@ -129,6 +133,7 @@ func newContainerData(containerName string, memoryStorage *memory.InMemoryStorag
|
|||
logUsage: logUsage,
|
||||
loadAvg: -1.0, // negative value indicates uninitialized.
|
||||
stop: make(chan bool, 1),
|
||||
collectorManager: collectorManager,
|
||||
}
|
||||
cont.info.ContainerReference = ref
|
||||
|
||||
|
@ -172,6 +177,7 @@ func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Tim
|
|||
return lastHousekeeping.Add(self.housekeepingInterval)
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Implement stats collecting as a custom collector.
|
||||
func (c *containerData) housekeeping() {
|
||||
// Long housekeeping is either 100ms or half of the housekeeping interval.
|
||||
longHousekeeping := 100 * time.Millisecond
|
||||
|
@ -226,12 +232,24 @@ func (c *containerData) housekeeping() {
|
|||
}
|
||||
}
|
||||
|
||||
// Schedule the next housekeeping. Sleep until that time.
|
||||
nextHousekeeping := c.nextHousekeeping(lastHousekeeping)
|
||||
if time.Now().Before(nextHousekeeping) {
|
||||
time.Sleep(nextHousekeeping.Sub(time.Now()))
|
||||
// Run custom collectors.
|
||||
nextCollectionTime, err := c.collectorManager.Collect()
|
||||
if err != nil && c.allowErrorLogging() {
|
||||
glog.Warningf("[%s] Collection failed: %v", c.info.Name, err)
|
||||
}
|
||||
lastHousekeeping = nextHousekeeping
|
||||
|
||||
// Next housekeeping is the first of the stats or the custom collector's housekeeping.
|
||||
nextHousekeeping := c.nextHousekeeping(lastHousekeeping)
|
||||
next := nextHousekeeping
|
||||
if !nextCollectionTime.IsZero() && nextCollectionTime.Before(nextHousekeeping) {
|
||||
next = nextCollectionTime
|
||||
}
|
||||
|
||||
// Schedule the next housekeeping. Sleep until that time.
|
||||
if time.Now().Before(next) {
|
||||
time.Sleep(next.Sub(time.Now()))
|
||||
}
|
||||
lastHousekeeping = next
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -302,7 +320,7 @@ func (c *containerData) updateStats() error {
|
|||
err := c.summaryReader.AddSample(*stats)
|
||||
if err != nil {
|
||||
// Ignore summary errors for now.
|
||||
glog.V(2).Infof("failed to add summary stats for %q: %v", c.info.Name, err)
|
||||
glog.V(2).Infof("Failed to add summary stats for %q: %v", c.info.Name, err)
|
||||
}
|
||||
}
|
||||
ref, err := c.handler.ContainerReference()
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/collector"
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
itest "github.com/google/cadvisor/info/v1/test"
|
||||
|
@ -40,7 +41,7 @@ func setupContainerData(t *testing.T, spec info.ContainerSpec) (*containerData,
|
|||
nil,
|
||||
)
|
||||
memoryStorage := memory.New(60, nil)
|
||||
ret, err := newContainerData(containerName, memoryStorage, mockHandler, nil, false)
|
||||
ret, err := newContainerData(containerName, memoryStorage, mockHandler, nil, false, &collector.FakeCollectorManager{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -20,12 +20,14 @@ import (
|
|||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/collector"
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/docker"
|
||||
"github.com/google/cadvisor/container/raw"
|
||||
|
@ -42,6 +44,8 @@ import (
|
|||
var globalHousekeepingInterval = flag.Duration("global_housekeeping_interval", 1*time.Minute, "Interval between global housekeepings")
|
||||
var logCadvisorUsage = flag.Bool("log_cadvisor_usage", false, "Whether to log the usage of the cAdvisor container")
|
||||
var enableLoadReader = flag.Bool("enable_load_reader", false, "Whether to enable cpu load reader")
|
||||
var eventStorageAgeLimit = flag.String("event_storage_age_limit", "default=24h", "Max length of time for which to store events (per type). Value is a comma separated list of key values, where the keys are event types (e.g.: creation, oom) or \"default\" and the value is a duration. Default is applied to all non-specified event types")
|
||||
var eventStorageEventLimit = flag.String("event_storage_event_limit", "default=100000", "Max number of events to store (per type). Value is a comma separated list of key values, where the keys are event types (e.g.: creation, oom) or \"default\" and the value is an integer. Default is applied to all non-specified event types")
|
||||
|
||||
// The Manager interface defines operations for starting a manager and getting
|
||||
// container and machine information.
|
||||
|
@ -73,6 +77,9 @@ type Manager interface {
|
|||
// Get info for all requested containers based on the request options.
|
||||
GetRequestedContainersInfo(containerName string, options v2.RequestOptions) (map[string]*info.ContainerInfo, error)
|
||||
|
||||
// Returns true if the named container exists.
|
||||
Exists(containerName string) bool
|
||||
|
||||
// Get information about the machine.
|
||||
GetMachineInfo() (*info.MachineInfo, error)
|
||||
|
||||
|
@ -133,7 +140,7 @@ func New(memoryStorage *memory.InMemoryStorage, sysfs sysfs.SysFs) (Manager, err
|
|||
newManager.versionInfo = *versionInfo
|
||||
glog.Infof("Version: %+v", newManager.versionInfo)
|
||||
|
||||
newManager.eventHandler = events.NewEventManager()
|
||||
newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy())
|
||||
|
||||
// Register Docker container factory.
|
||||
err = docker.Register(newManager, fsInfo)
|
||||
|
@ -618,14 +625,39 @@ func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
|
|||
return &m.versionInfo, nil
|
||||
}
|
||||
|
||||
func (m *manager) Exists(containerName string) bool {
|
||||
m.containersLock.Lock()
|
||||
defer m.containersLock.Unlock()
|
||||
|
||||
namespacedName := namespacedContainerName{
|
||||
Name: containerName,
|
||||
}
|
||||
|
||||
_, ok := m.containers[namespacedName]
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Create a container.
|
||||
func (m *manager) createContainer(containerName string) error {
|
||||
handler, err := container.NewContainerHandler(containerName)
|
||||
handler, accept, err := container.NewContainerHandler(containerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !accept {
|
||||
// ignoring this container.
|
||||
glog.V(4).Infof("ignoring container %q", containerName)
|
||||
return nil
|
||||
}
|
||||
// TODO(vmarmol): Register collectors.
|
||||
collectorManager, err := collector.NewCollectorManager()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logUsage := *logCadvisorUsage && containerName == m.cadvisorContainer
|
||||
cont, err := newContainerData(containerName, m.memoryStorage, handler, m.loadReader, logUsage)
|
||||
cont, err := newContainerData(containerName, m.memoryStorage, handler, m.loadReader, logUsage, collectorManager)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -659,33 +691,26 @@ func (m *manager) createContainer(containerName string) error {
|
|||
if alreadyExists {
|
||||
return nil
|
||||
}
|
||||
glog.V(2).Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
glog.V(3).Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
|
||||
contSpec, err := cont.handler.GetSpec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if contSpec.CreationTime.After(m.startupTime) {
|
||||
contRef, err := cont.handler.ContainerReference()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contRef, err := cont.handler.ContainerReference()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newEvent := &info.Event{
|
||||
ContainerName: contRef.Name,
|
||||
Timestamp: contSpec.CreationTime,
|
||||
EventType: info.EventContainerCreation,
|
||||
EventData: info.EventData{
|
||||
Created: &info.CreatedEventData{
|
||||
Spec: contSpec,
|
||||
},
|
||||
},
|
||||
}
|
||||
err = m.eventHandler.AddEvent(newEvent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newEvent := &info.Event{
|
||||
ContainerName: contRef.Name,
|
||||
Timestamp: contSpec.CreationTime,
|
||||
EventType: info.EventContainerCreation,
|
||||
}
|
||||
err = m.eventHandler.AddEvent(newEvent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start the container's housekeeping.
|
||||
|
@ -721,7 +746,7 @@ func (m *manager) destroyContainer(containerName string) error {
|
|||
Name: alias,
|
||||
})
|
||||
}
|
||||
glog.V(2).Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
glog.V(3).Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
|
||||
contRef, err := cont.handler.ContainerReference()
|
||||
if err != nil {
|
||||
|
@ -925,3 +950,50 @@ func (self *manager) GetPastEvents(request *events.Request) ([]*info.Event, erro
|
|||
func (self *manager) CloseEventChannel(watch_id int) {
|
||||
self.eventHandler.StopWatch(watch_id)
|
||||
}
|
||||
|
||||
// Parses the events StoragePolicy from the flags.
|
||||
func parseEventsStoragePolicy() events.StoragePolicy {
|
||||
policy := events.DefaultStoragePolicy()
|
||||
|
||||
// Parse max age.
|
||||
parts := strings.Split(*eventStorageAgeLimit, ",")
|
||||
for _, part := range parts {
|
||||
items := strings.Split(part, "=")
|
||||
if len(items) != 2 {
|
||||
glog.Warningf("Unknown event storage policy %q when parsing max age", part)
|
||||
continue
|
||||
}
|
||||
dur, err := time.ParseDuration(items[1])
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to parse event max age duration %q: %v", items[1], err)
|
||||
continue
|
||||
}
|
||||
if items[0] == "default" {
|
||||
policy.DefaultMaxAge = dur
|
||||
continue
|
||||
}
|
||||
policy.PerTypeMaxAge[info.EventType(items[0])] = dur
|
||||
}
|
||||
|
||||
// Parse max number.
|
||||
parts = strings.Split(*eventStorageEventLimit, ",")
|
||||
for _, part := range parts {
|
||||
items := strings.Split(part, "=")
|
||||
if len(items) != 2 {
|
||||
glog.Warningf("Unknown event storage policy %q when parsing max event limit", part)
|
||||
continue
|
||||
}
|
||||
val, err := strconv.Atoi(items[1])
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to parse integer from %q: %v", items[1], err)
|
||||
continue
|
||||
}
|
||||
if items[0] == "default" {
|
||||
policy.DefaultMaxNumEvents = val
|
||||
continue
|
||||
}
|
||||
policy.PerTypeMaxNumEvents[info.EventType(items[0])] = val
|
||||
}
|
||||
|
||||
return policy
|
||||
}
|
||||
|
|
|
@ -70,6 +70,11 @@ func (c *ManagerMock) GetRequestedContainersInfo(containerName string, options v
|
|||
return args.Get(0).(map[string]*info.ContainerInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) Exists(name string) bool {
|
||||
args := c.Called(name)
|
||||
return args.Get(0).(bool)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) WatchForEvents(queryuest *events.Request, passedChannel chan *info.Event) error {
|
||||
args := c.Called(queryuest, passedChannel)
|
||||
return args.Error(0)
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/collector"
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/docker"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
@ -52,7 +53,7 @@ func createManagerAndAddContainers(
|
|||
spec,
|
||||
nil,
|
||||
).Once()
|
||||
cont, err := newContainerData(name, memoryStorage, mockHandler, nil, false)
|
||||
cont, err := newContainerData(name, memoryStorage, mockHandler, nil, false, &collector.FakeCollectorManager{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -195,12 +195,14 @@ func serveContainersPage(m manager.Manager, w http.ResponseWriter, u *url.URL) e
|
|||
return err
|
||||
}
|
||||
|
||||
rootDir := getRootDir(containerName)
|
||||
|
||||
// Make a list of the parent containers and their links
|
||||
pathParts := strings.Split(string(cont.Name), "/")
|
||||
parentContainers := make([]link, 0, len(pathParts))
|
||||
parentContainers = append(parentContainers, link{
|
||||
Text: "root",
|
||||
Link: ContainersPage,
|
||||
Link: path.Join(rootDir, ContainersPage),
|
||||
})
|
||||
for i := 1; i < len(pathParts); i++ {
|
||||
// Skip empty parts.
|
||||
|
@ -209,16 +211,19 @@ func serveContainersPage(m manager.Manager, w http.ResponseWriter, u *url.URL) e
|
|||
}
|
||||
parentContainers = append(parentContainers, link{
|
||||
Text: pathParts[i],
|
||||
Link: path.Join(ContainersPage, path.Join(pathParts[1:i+1]...)),
|
||||
Link: path.Join(rootDir, ContainersPage, path.Join(pathParts[1:i+1]...)),
|
||||
})
|
||||
}
|
||||
|
||||
// Build the links for the subcontainers.
|
||||
subcontainerLinks := make([]link, 0, len(cont.Subcontainers))
|
||||
for _, sub := range cont.Subcontainers {
|
||||
if !m.Exists(sub.Name) {
|
||||
continue
|
||||
}
|
||||
subcontainerLinks = append(subcontainerLinks, link{
|
||||
Text: getContainerDisplayName(sub),
|
||||
Link: path.Join(ContainersPage, sub.Name),
|
||||
Link: path.Join(rootDir, ContainersPage, sub.Name),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -236,6 +241,7 @@ func serveContainersPage(m manager.Manager, w http.ResponseWriter, u *url.URL) e
|
|||
MemoryAvailable: cont.Spec.HasMemory,
|
||||
NetworkAvailable: cont.Spec.HasNetwork,
|
||||
FsAvailable: cont.Spec.HasFilesystem,
|
||||
Root: rootDir,
|
||||
}
|
||||
err = pageTemplate.Execute(w, data)
|
||||
if err != nil {
|
||||
|
@ -245,3 +251,10 @@ func serveContainersPage(m manager.Manager, w http.ResponseWriter, u *url.URL) e
|
|||
glog.V(5).Infof("Request took %s", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build a relative path to the root of the container page.
|
||||
func getRootDir(containerName string) string {
|
||||
// The root is at: container depth
|
||||
levels := (strings.Count(containerName, "/"))
|
||||
return strings.Repeat("../", levels)
|
||||
}
|
||||
|
|
|
@ -19,23 +19,23 @@ const containersHtmlTemplate = `
|
|||
<head>
|
||||
<title>cAdvisor - {{.DisplayName}}</title>
|
||||
<!-- Latest compiled and minified CSS -->
|
||||
<link rel="stylesheet" href="/static/bootstrap-3.1.1.min.css">
|
||||
<link rel="stylesheet" href="{{.Root}}static/bootstrap-3.1.1.min.css">
|
||||
|
||||
<!-- Optional theme -->
|
||||
<link rel="stylesheet" href="/static/bootstrap-theme-3.1.1.min.css">
|
||||
<link rel="stylesheet" href="{{.Root}}static/bootstrap-theme-3.1.1.min.css">
|
||||
|
||||
<link rel="stylesheet" href="/static/containers.css">
|
||||
<link rel="stylesheet" href="{{.Root}}static/containers.css">
|
||||
|
||||
<!-- Latest compiled and minified JavaScript -->
|
||||
<script src="/static/jquery-1.10.2.min.js"></script>
|
||||
<script src="/static/bootstrap-3.1.1.min.js"></script>
|
||||
<script type="text/javascript" src="/static/google-jsapi.js"></script>
|
||||
<script src="{{.Root}}static/jquery-1.10.2.min.js"></script>
|
||||
<script src="{{.Root}}static/bootstrap-3.1.1.min.js"></script>
|
||||
<script type="text/javascript" src="{{.Root}}static/google-jsapi.js"></script>
|
||||
|
||||
<script type="text/javascript" src="/static/containers.js"></script>
|
||||
<script type="text/javascript" src="{{.Root}}static/containers.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container theme-showcase" >
|
||||
<a href="/" class="col-sm-12" id="logo">
|
||||
<a href="{{.Root}}" class="col-sm-12" id="logo">
|
||||
</a>
|
||||
<div class="col-sm-12">
|
||||
<div class="page-header">
|
||||
|
@ -49,7 +49,7 @@ const containersHtmlTemplate = `
|
|||
</div>
|
||||
{{if .IsRoot}}
|
||||
<div class="col-sm-12">
|
||||
<h4><a href="/docker">Docker Containers</a></h4>
|
||||
<h4><a href="../docker">Docker Containers</a></h4>
|
||||
</div>
|
||||
{{end}}
|
||||
{{if .Subcontainers}}
|
||||
|
@ -179,7 +179,7 @@ const containersHtmlTemplate = `
|
|||
{{end}}
|
||||
</div>
|
||||
<script type="text/javascript">
|
||||
startPage({{.ContainerName}}, {{.CpuAvailable}}, {{.MemoryAvailable}});
|
||||
startPage({{.ContainerName}}, {{.CpuAvailable}}, {{.MemoryAvailable}}, {{.Root}});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
|
|
@ -34,6 +34,7 @@ func serveDockerPage(m manager.Manager, w http.ResponseWriter, u *url.URL) error
|
|||
|
||||
// The container name is the path after the handler
|
||||
containerName := u.Path[len(DockerPage):]
|
||||
rootDir := getRootDir(u.Path)
|
||||
|
||||
var data *pageData
|
||||
if containerName == "" {
|
||||
|
@ -62,6 +63,7 @@ func serveDockerPage(m manager.Manager, w http.ResponseWriter, u *url.URL) error
|
|||
Link: DockerPage,
|
||||
}},
|
||||
Subcontainers: subcontainers,
|
||||
Root: rootDir,
|
||||
}
|
||||
} else {
|
||||
// Get the container.
|
||||
|
@ -103,6 +105,7 @@ func serveDockerPage(m manager.Manager, w http.ResponseWriter, u *url.URL) error
|
|||
MemoryAvailable: cont.Spec.HasMemory,
|
||||
NetworkAvailable: cont.Spec.HasNetwork,
|
||||
FsAvailable: cont.Spec.HasFilesystem,
|
||||
Root: rootDir,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,6 +51,7 @@ type pageData struct {
|
|||
MemoryAvailable bool
|
||||
NetworkAvailable bool
|
||||
FsAvailable bool
|
||||
Root string
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
14
Godeps/_workspace/src/github.com/google/cadvisor/pages/static/containers_js.go
generated
vendored
14
Godeps/_workspace/src/github.com/google/cadvisor/pages/static/containers_js.go
generated
vendored
|
@ -1518,21 +1518,21 @@ function drawGauges(elementId, gauges) {
|
|||
}
|
||||
|
||||
// Get the machine info.
|
||||
function getMachineInfo(callback) {
|
||||
$.getJSON("/api/v1.0/machine", function(data) {
|
||||
function getMachineInfo(rootDir, callback) {
|
||||
$.getJSON(rootDir + "api/v1.0/machine", function(data) {
|
||||
callback(data);
|
||||
});
|
||||
}
|
||||
|
||||
// Get the container stats for the specified container.
|
||||
function getStats(containerName, callback) {
|
||||
function getStats(rootDir, containerName, callback) {
|
||||
// Request 60s of container history and no samples.
|
||||
var request = JSON.stringify({
|
||||
// Update main.statsRequestedByUI while updating "num_stats" here.
|
||||
"num_stats": 60,
|
||||
"num_samples": 0
|
||||
});
|
||||
$.post("/api/v1.0/containers" + containerName, request, function(data) {
|
||||
$.post(rootDir + "api/v1.0/containers" + containerName, request, function(data) {
|
||||
callback(data);
|
||||
}, "json");
|
||||
}
|
||||
|
@ -1891,7 +1891,7 @@ function drawCharts(machineInfo, containerInfo) {
|
|||
}
|
||||
|
||||
// Executed when the page finishes loading.
|
||||
function startPage(containerName, hasCpu, hasMemory) {
|
||||
function startPage(containerName, hasCpu, hasMemory, rootDir) {
|
||||
// Don't fetch data if we don't have any resource.
|
||||
if (!hasCpu && !hasMemory) {
|
||||
return;
|
||||
|
@ -1902,9 +1902,9 @@ function startPage(containerName, hasCpu, hasMemory) {
|
|||
window.cadvisor.firstRun = true;
|
||||
|
||||
// Get machine info, then get the stats every 1s.
|
||||
getMachineInfo(function(machineInfo) {
|
||||
getMachineInfo(rootDir, function(machineInfo) {
|
||||
setInterval(function() {
|
||||
getStats(containerName, function(containerInfo){
|
||||
getStats(rootDir, containerName, function(containerInfo){
|
||||
if (window.cadvisor.firstRun && containerInfo.spec.has_filesystem) {
|
||||
window.cadvisor.firstRun = false;
|
||||
startFileSystemUsage("filesystem-usage", machineInfo, containerInfo);
|
||||
|
|
|
@ -57,7 +57,7 @@ func (self *containerStorage) RecentStats(start, end time.Time, maxStats int) ([
|
|||
func newContainerStore(ref info.ContainerReference, maxAge time.Duration) *containerStorage {
|
||||
return &containerStorage{
|
||||
ref: ref,
|
||||
recentStats: utils.NewTimedStore(maxAge),
|
||||
recentStats: utils.NewTimedStore(maxAge, -1),
|
||||
maxAge: maxAge,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -180,7 +180,7 @@ func trySystemd() (*OomParser, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(1).Infof("oomparser using systemd")
|
||||
glog.Infof("oomparser using systemd")
|
||||
return &OomParser{
|
||||
ioreader: bufio.NewReader(readcloser),
|
||||
}, nil
|
||||
|
|
|
@ -19,10 +19,12 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// A time-based buffer for ContainerStats. Holds information for a specific time period.
|
||||
// A time-based buffer for ContainerStats.
|
||||
// Holds information for a specific time period and/or a max number of items.
|
||||
type TimedStore struct {
|
||||
buffer []timedStoreData
|
||||
age time.Duration
|
||||
buffer []timedStoreData
|
||||
age time.Duration
|
||||
maxItems int
|
||||
}
|
||||
|
||||
type timedStoreData struct {
|
||||
|
@ -31,10 +33,12 @@ type timedStoreData struct {
|
|||
}
|
||||
|
||||
// Returns a new thread-compatible TimedStore.
|
||||
func NewTimedStore(age time.Duration) *TimedStore {
|
||||
// A maxItems value of -1 means no limit.
|
||||
func NewTimedStore(age time.Duration, maxItems int) *TimedStore {
|
||||
return &TimedStore{
|
||||
buffer: make([]timedStoreData, 0),
|
||||
age: age,
|
||||
buffer: make([]timedStoreData, 0),
|
||||
age: age,
|
||||
maxItems: maxItems,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,6 +53,12 @@ func (self *TimedStore) Add(timestamp time.Time, item interface{}) {
|
|||
self.buffer = self.buffer[index:]
|
||||
}
|
||||
|
||||
// Remove any elements if over our max size.
|
||||
if self.maxItems >= 0 && (len(self.buffer)+1) > self.maxItems {
|
||||
startIndex := len(self.buffer) + 1 - self.maxItems
|
||||
self.buffer = self.buffer[startIndex:]
|
||||
}
|
||||
|
||||
copied := item
|
||||
self.buffer = append(self.buffer, timedStoreData{
|
||||
timestamp: timestamp,
|
||||
|
|
|
@ -55,7 +55,7 @@ func expectElements(t *testing.T, actual []interface{}, expected []int) {
|
|||
}
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
sb := NewTimedStore(5 * time.Second)
|
||||
sb := NewTimedStore(5*time.Second, 100)
|
||||
|
||||
// Add 1.
|
||||
sb.Add(createTime(0), 0)
|
||||
|
@ -84,7 +84,7 @@ func TestAdd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
sb := NewTimedStore(5 * time.Second)
|
||||
sb := NewTimedStore(5*time.Second, -1)
|
||||
sb.Add(createTime(1), 1)
|
||||
sb.Add(createTime(2), 2)
|
||||
sb.Add(createTime(3), 3)
|
||||
|
@ -97,7 +97,7 @@ func TestGet(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInTimeRange(t *testing.T) {
|
||||
sb := NewTimedStore(5 * time.Second)
|
||||
sb := NewTimedStore(5*time.Second, -1)
|
||||
assert := assert.New(t)
|
||||
|
||||
var empty time.Time
|
||||
|
@ -174,7 +174,7 @@ func TestInTimeRange(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInTimeRangeWithLimit(t *testing.T) {
|
||||
sb := NewTimedStore(5 * time.Second)
|
||||
sb := NewTimedStore(5*time.Second, -1)
|
||||
sb.Add(createTime(1), 1)
|
||||
sb.Add(createTime(2), 2)
|
||||
sb.Add(createTime(3), 3)
|
||||
|
@ -190,3 +190,32 @@ func TestInTimeRangeWithLimit(t *testing.T) {
|
|||
expectElements(t, sb.InTimeRange(empty, empty, 1), []int{4})
|
||||
assert.Empty(t, sb.InTimeRange(empty, empty, 0))
|
||||
}
|
||||
|
||||
func TestLimitedSize(t *testing.T) {
|
||||
sb := NewTimedStore(time.Hour, 5)
|
||||
|
||||
// Add 1.
|
||||
sb.Add(createTime(0), 0)
|
||||
expectSize(t, sb, 1)
|
||||
expectAllElements(t, sb, []int{0})
|
||||
|
||||
// Fill the buffer.
|
||||
for i := 1; i <= 5; i++ {
|
||||
expectSize(t, sb, i)
|
||||
sb.Add(createTime(i), i)
|
||||
}
|
||||
expectSize(t, sb, 5)
|
||||
expectAllElements(t, sb, []int{1, 2, 3, 4, 5})
|
||||
|
||||
// Add more than is available in the buffer
|
||||
sb.Add(createTime(6), 6)
|
||||
expectSize(t, sb, 5)
|
||||
expectAllElements(t, sb, []int{2, 3, 4, 5, 6})
|
||||
|
||||
// Replace all elements.
|
||||
for i := 7; i <= 10; i++ {
|
||||
sb.Add(createTime(i), i)
|
||||
}
|
||||
expectSize(t, sb, 5)
|
||||
expectAllElements(t, sb, []int{6, 7, 8, 9, 10})
|
||||
}
|
||||
|
|
|
@ -15,4 +15,4 @@
|
|||
package version
|
||||
|
||||
// Version of cAdvisor.
|
||||
const VERSION = "0.12.0"
|
||||
const VERSION = "0.13.0"
|
||||
|
|
|
@ -24,7 +24,7 @@ Example:
|
|||
}
|
||||
|
||||
*/
|
||||
package inotify
|
||||
package inotify // import "golang.org/x/exp/inotify"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
@ -221,7 +221,7 @@ func (e *Event) String() string {
|
|||
|
||||
m := e.Mask
|
||||
for _, b := range eventBits {
|
||||
if m&b.Value != 0 {
|
||||
if m&b.Value == b.Value {
|
||||
m &^= b.Value
|
||||
events += "|" + b.Name
|
||||
}
|
Loading…
Reference in New Issue