mirror of https://github.com/k3s-io/k3s
Vet fixes, mostly pass lock by value errors.
parent
8e4ac1925b
commit
696423e044
|
@ -15627,9 +15627,6 @@
|
||||||
"v1.NodeStatus": {
|
"v1.NodeStatus": {
|
||||||
"id": "v1.NodeStatus",
|
"id": "v1.NodeStatus",
|
||||||
"description": "NodeStatus is information about the current status of a node.",
|
"description": "NodeStatus is information about the current status of a node.",
|
||||||
"required": [
|
|
||||||
"images"
|
|
||||||
],
|
|
||||||
"properties": {
|
"properties": {
|
||||||
"capacity": {
|
"capacity": {
|
||||||
"type": "any",
|
"type": "any",
|
||||||
|
|
|
@ -153,7 +153,6 @@ func (g *genProtoIDL) GenerateType(c *generator.Context, t *types.Type, w io.Wri
|
||||||
default:
|
default:
|
||||||
return b.unknown(sw)
|
return b.unknown(sw)
|
||||||
}
|
}
|
||||||
return sw.Error()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProtobufFromGoNamer finds the protobuf name of a type (and its package, and
|
// ProtobufFromGoNamer finds the protobuf name of a type (and its package, and
|
||||||
|
@ -424,7 +423,7 @@ func memberTypeToProtobufField(locator ProtobufLocator, field *protoField, t *ty
|
||||||
field.Nullable = true
|
field.Nullable = true
|
||||||
case types.Alias:
|
case types.Alias:
|
||||||
if err := memberTypeToProtobufField(locator, field, t.Underlying); err != nil {
|
if err := memberTypeToProtobufField(locator, field, t.Underlying); err != nil {
|
||||||
log.Printf("failed to alias: %s %s: err", t.Name, t.Underlying.Name, err)
|
log.Printf("failed to alias: %s %s: err %v", t.Name, t.Underlying.Name, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if field.Extras == nil {
|
if field.Extras == nil {
|
||||||
|
@ -594,7 +593,7 @@ func membersToFields(locator ProtobufLocator, t *types.Type, localPackage types.
|
||||||
tag := field.Tag
|
tag := field.Tag
|
||||||
if tag != -1 {
|
if tag != -1 {
|
||||||
if existing, ok := byTag[tag]; ok {
|
if existing, ok := byTag[tag]; ok {
|
||||||
return nil, fmt.Errorf("field %q and %q in %q both have tag %d", field.Name, existing.Name, tag)
|
return nil, fmt.Errorf("field %q and %q both have tag %d", field.Name, existing.Name, tag)
|
||||||
}
|
}
|
||||||
byTag[tag] = field
|
byTag[tag] = field
|
||||||
}
|
}
|
||||||
|
|
|
@ -165,7 +165,7 @@ var FooAnotherVar proto.Frobber = proto.AnotherVar
|
||||||
t.Errorf("Wanted, got:\n%v\n-----\n%v\n", e, a)
|
t.Errorf("Wanted, got:\n%v\n-----\n%v\n", e, a)
|
||||||
}
|
}
|
||||||
if p := u.Package("base/foo/proto"); !p.HasImport("base/common/proto") {
|
if p := u.Package("base/foo/proto"); !p.HasImport("base/common/proto") {
|
||||||
t.Errorf("Unexpected lack of import line: %#s", p.Imports)
|
t.Errorf("Unexpected lack of import line: %s", p.Imports)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -488,7 +488,7 @@ func (k *Executor) bindAndWatchTask(driver bindings.ExecutorDriver, task *mesos.
|
||||||
// within the launch timeout window we should see a pod-task update via the registry.
|
// within the launch timeout window we should see a pod-task update via the registry.
|
||||||
// if we see a Running update then we need to generate a TASK_RUNNING status update for mesos.
|
// if we see a Running update then we need to generate a TASK_RUNNING status update for mesos.
|
||||||
handlerFinished := false
|
handlerFinished := false
|
||||||
handler := watchHandler{
|
handler := &watchHandler{
|
||||||
expiration: watchExpiration{
|
expiration: watchExpiration{
|
||||||
timeout: launchTimer.C,
|
timeout: launchTimer.C,
|
||||||
onEvent: func(taskID string) {
|
onEvent: func(taskID string) {
|
||||||
|
|
|
@ -118,7 +118,7 @@ func NewRegistry(client *clientset.Clientset) Registry {
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r registryImpl) watch() <-chan *PodEvent {
|
func (r *registryImpl) watch() <-chan *PodEvent {
|
||||||
return r.updates
|
return r.updates
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,26 +130,26 @@ func taskIDFor(pod *api.Pod) (taskID string, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r registryImpl) shutdown() {
|
func (r *registryImpl) shutdown() {
|
||||||
//TODO(jdef) flesh this out
|
//TODO(jdef) flesh this out
|
||||||
r.m.Lock()
|
r.m.Lock()
|
||||||
defer r.m.Unlock()
|
defer r.m.Unlock()
|
||||||
r.boundTasks = map[string]*api.Pod{}
|
r.boundTasks = map[string]*api.Pod{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r registryImpl) empty() bool {
|
func (r *registryImpl) empty() bool {
|
||||||
r.m.RLock()
|
r.m.RLock()
|
||||||
defer r.m.RUnlock()
|
defer r.m.RUnlock()
|
||||||
return len(r.boundTasks) == 0
|
return len(r.boundTasks) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r registryImpl) pod(taskID string) *api.Pod {
|
func (r *registryImpl) pod(taskID string) *api.Pod {
|
||||||
r.m.RLock()
|
r.m.RLock()
|
||||||
defer r.m.RUnlock()
|
defer r.m.RUnlock()
|
||||||
return r.boundTasks[taskID]
|
return r.boundTasks[taskID]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r registryImpl) Remove(taskID string) error {
|
func (r *registryImpl) Remove(taskID string) error {
|
||||||
r.m.Lock()
|
r.m.Lock()
|
||||||
defer r.m.Unlock()
|
defer r.m.Unlock()
|
||||||
pod, ok := r.boundTasks[taskID]
|
pod, ok := r.boundTasks[taskID]
|
||||||
|
@ -169,7 +169,7 @@ func (r registryImpl) Remove(taskID string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r registryImpl) Update(pod *api.Pod) (*PodEvent, error) {
|
func (r *registryImpl) Update(pod *api.Pod) (*PodEvent, error) {
|
||||||
// Don't do anything for pods without task anotation which means:
|
// Don't do anything for pods without task anotation which means:
|
||||||
// - "pre-scheduled" pods which have a NodeName set to this node without being scheduled already.
|
// - "pre-scheduled" pods which have a NodeName set to this node without being scheduled already.
|
||||||
// - static/mirror pods: they'll never have a TaskID annotation, and we don't expect them to ever change.
|
// - static/mirror pods: they'll never have a TaskID annotation, and we don't expect them to ever change.
|
||||||
|
@ -254,8 +254,7 @@ func copyPorts(dest, src *api.Pod) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r registryImpl) bind(taskID string, pod *api.Pod) error {
|
func (r *registryImpl) bind(taskID string, pod *api.Pod) error {
|
||||||
|
|
||||||
// validate taskID matches that of the annotation
|
// validate taskID matches that of the annotation
|
||||||
annotatedTaskID, err := taskIDFor(pod)
|
annotatedTaskID, err := taskIDFor(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -55,7 +55,7 @@ type (
|
||||||
watcher struct {
|
watcher struct {
|
||||||
updates <-chan *PodEvent
|
updates <-chan *PodEvent
|
||||||
rw sync.RWMutex
|
rw sync.RWMutex
|
||||||
handlers map[string]watchHandler
|
handlers map[string]*watchHandler
|
||||||
filters []watchFilter
|
filters []watchFilter
|
||||||
runOnce chan struct{}
|
runOnce chan struct{}
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ type (
|
||||||
func newWatcher(updates <-chan *PodEvent) *watcher {
|
func newWatcher(updates <-chan *PodEvent) *watcher {
|
||||||
return &watcher{
|
return &watcher{
|
||||||
updates: updates,
|
updates: updates,
|
||||||
handlers: make(map[string]watchHandler),
|
handlers: make(map[string]*watchHandler),
|
||||||
runOnce: make(chan struct{}),
|
runOnce: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,7 @@ updateLoop:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.V(1).Info("handling " + u.FormatShort())
|
log.V(1).Info("handling " + u.FormatShort())
|
||||||
h, ok := func() (h watchHandler, ok bool) {
|
h, ok := func() (h *watchHandler, ok bool) {
|
||||||
pw.rw.RLock()
|
pw.rw.RLock()
|
||||||
defer pw.rw.RUnlock()
|
defer pw.rw.RUnlock()
|
||||||
h, ok = pw.handlers[u.taskID]
|
h, ok = pw.handlers[u.taskID]
|
||||||
|
@ -125,7 +125,7 @@ func (pw *watcher) addFilter(f watchFilter) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// forTask associates a handler `h` with the given taskID.
|
// forTask associates a handler `h` with the given taskID.
|
||||||
func (pw *watcher) forTask(taskID string, h watchHandler) {
|
func (pw *watcher) forTask(taskID string, h *watchHandler) {
|
||||||
pw.rw.Lock()
|
pw.rw.Lock()
|
||||||
pw.handlers[taskID] = h
|
pw.handlers[taskID] = h
|
||||||
pw.rw.Unlock()
|
pw.rw.Unlock()
|
||||||
|
|
|
@ -4492,7 +4492,7 @@ The resulting set of endpoints can be viewed as:<br>
|
||||||
<tr>
|
<tr>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">images</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">images</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">List of container images on this node</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">List of container images on this node</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_containerimage">v1.ContainerImage</a> array</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_containerimage">v1.ContainerImage</a> array</p></td>
|
||||||
<td class="tableblock halign-left valign-top"></td>
|
<td class="tableblock halign-left valign-top"></td>
|
||||||
</tr>
|
</tr>
|
||||||
|
@ -7780,7 +7780,7 @@ The resulting set of endpoints can be viewed as:<br>
|
||||||
</div>
|
</div>
|
||||||
<div id="footer">
|
<div id="footer">
|
||||||
<div id="footer-text">
|
<div id="footer-text">
|
||||||
Last updated 2016-03-21 22:46:36 UTC
|
Last updated 2016-04-06 18:15:59 UTC
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</body>
|
</body>
|
||||||
|
|
|
@ -101,7 +101,7 @@ func InterpretWatchError(err error, resource unversioned.GroupResource, name str
|
||||||
switch {
|
switch {
|
||||||
case storage.IsInvalidError(err):
|
case storage.IsInvalidError(err):
|
||||||
invalidError, _ := err.(storage.InvalidError)
|
invalidError, _ := err.(storage.InvalidError)
|
||||||
return errors.NewInvalid(unversioned.GroupKind{resource.Group, resource.Resource}, name, invalidError.Errs)
|
return errors.NewInvalid(unversioned.GroupKind{Group: resource.Group, Kind: resource.Resource}, name, invalidError.Errs)
|
||||||
default:
|
default:
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Obje
|
||||||
}
|
}
|
||||||
// Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too.
|
// Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too.
|
||||||
if options.Preconditions != nil && options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.UID {
|
if options.Preconditions != nil && options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.UID {
|
||||||
return false, false, errors.NewConflict(unversioned.GroupResource{gvk.Group, gvk.Kind}, objectMeta.Name, fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.UID))
|
return false, false, errors.NewConflict(unversioned.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.Name, fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.UID))
|
||||||
}
|
}
|
||||||
gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy)
|
gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|
|
@ -33273,11 +33273,12 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||||
yyq2[4] = len(x.Addresses) != 0
|
yyq2[4] = len(x.Addresses) != 0
|
||||||
yyq2[5] = true
|
yyq2[5] = true
|
||||||
yyq2[6] = true
|
yyq2[6] = true
|
||||||
|
yyq2[7] = len(x.Images) != 0
|
||||||
var yynn2 int
|
var yynn2 int
|
||||||
if yyr2 || yy2arr2 {
|
if yyr2 || yy2arr2 {
|
||||||
r.EncodeArrayStart(8)
|
r.EncodeArrayStart(8)
|
||||||
} else {
|
} else {
|
||||||
yynn2 = 1
|
yynn2 = 0
|
||||||
for _, b := range yyq2 {
|
for _, b := range yyq2 {
|
||||||
if b {
|
if b {
|
||||||
yynn2++
|
yynn2++
|
||||||
|
@ -33449,28 +33450,34 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||||
}
|
}
|
||||||
if yyr2 || yy2arr2 {
|
if yyr2 || yy2arr2 {
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||||
if x.Images == nil {
|
if yyq2[7] {
|
||||||
r.EncodeNil()
|
if x.Images == nil {
|
||||||
} else {
|
r.EncodeNil()
|
||||||
yym29 := z.EncBinary()
|
|
||||||
_ = yym29
|
|
||||||
if false {
|
|
||||||
} else {
|
} else {
|
||||||
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
|
yym29 := z.EncBinary()
|
||||||
|
_ = yym29
|
||||||
|
if false {
|
||||||
|
} else {
|
||||||
|
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
r.EncodeNil()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
if yyq2[7] {
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("images"))
|
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
r.EncodeString(codecSelferC_UTF81234, string("images"))
|
||||||
if x.Images == nil {
|
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||||
r.EncodeNil()
|
if x.Images == nil {
|
||||||
} else {
|
r.EncodeNil()
|
||||||
yym30 := z.EncBinary()
|
|
||||||
_ = yym30
|
|
||||||
if false {
|
|
||||||
} else {
|
} else {
|
||||||
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
|
yym30 := z.EncBinary()
|
||||||
|
_ = yym30
|
||||||
|
if false {
|
||||||
|
} else {
|
||||||
|
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1671,8 +1671,14 @@ type NodeSpec struct {
|
||||||
|
|
||||||
// DaemonEndpoint contains information about a single Daemon endpoint.
|
// DaemonEndpoint contains information about a single Daemon endpoint.
|
||||||
type DaemonEndpoint struct {
|
type DaemonEndpoint struct {
|
||||||
|
/*
|
||||||
|
The port tag was not properly in quotes in earlier releases, so it must be
|
||||||
|
uppercased for backwards compat (since it was falling back to var name of
|
||||||
|
'Port').
|
||||||
|
*/
|
||||||
|
|
||||||
// Port number of the given endpoint.
|
// Port number of the given endpoint.
|
||||||
Port int `json:port`
|
Port int `json:"Port"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
|
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
|
||||||
|
@ -1718,7 +1724,7 @@ type NodeStatus struct {
|
||||||
// Set of ids/uuids to uniquely identify the node.
|
// Set of ids/uuids to uniquely identify the node.
|
||||||
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"`
|
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"`
|
||||||
// List of container images on this node
|
// List of container images on this node
|
||||||
Images []ContainerImage `json:"images",omitempty`
|
Images []ContainerImage `json:"images,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe a container image
|
// Describe a container image
|
||||||
|
|
|
@ -33155,11 +33155,12 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||||
yyq2[4] = len(x.Addresses) != 0
|
yyq2[4] = len(x.Addresses) != 0
|
||||||
yyq2[5] = true
|
yyq2[5] = true
|
||||||
yyq2[6] = true
|
yyq2[6] = true
|
||||||
|
yyq2[7] = len(x.Images) != 0
|
||||||
var yynn2 int
|
var yynn2 int
|
||||||
if yyr2 || yy2arr2 {
|
if yyr2 || yy2arr2 {
|
||||||
r.EncodeArrayStart(8)
|
r.EncodeArrayStart(8)
|
||||||
} else {
|
} else {
|
||||||
yynn2 = 1
|
yynn2 = 0
|
||||||
for _, b := range yyq2 {
|
for _, b := range yyq2 {
|
||||||
if b {
|
if b {
|
||||||
yynn2++
|
yynn2++
|
||||||
|
@ -33331,28 +33332,34 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||||
}
|
}
|
||||||
if yyr2 || yy2arr2 {
|
if yyr2 || yy2arr2 {
|
||||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||||
if x.Images == nil {
|
if yyq2[7] {
|
||||||
r.EncodeNil()
|
if x.Images == nil {
|
||||||
} else {
|
r.EncodeNil()
|
||||||
yym29 := z.EncBinary()
|
|
||||||
_ = yym29
|
|
||||||
if false {
|
|
||||||
} else {
|
} else {
|
||||||
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
|
yym29 := z.EncBinary()
|
||||||
|
_ = yym29
|
||||||
|
if false {
|
||||||
|
} else {
|
||||||
|
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
r.EncodeNil()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
if yyq2[7] {
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("images"))
|
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
r.EncodeString(codecSelferC_UTF81234, string("images"))
|
||||||
if x.Images == nil {
|
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||||
r.EncodeNil()
|
if x.Images == nil {
|
||||||
} else {
|
r.EncodeNil()
|
||||||
yym30 := z.EncBinary()
|
|
||||||
_ = yym30
|
|
||||||
if false {
|
|
||||||
} else {
|
} else {
|
||||||
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
|
yym30 := z.EncBinary()
|
||||||
|
_ = yym30
|
||||||
|
if false {
|
||||||
|
} else {
|
||||||
|
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2047,8 +2047,14 @@ type NodeSpec struct {
|
||||||
|
|
||||||
// DaemonEndpoint contains information about a single Daemon endpoint.
|
// DaemonEndpoint contains information about a single Daemon endpoint.
|
||||||
type DaemonEndpoint struct {
|
type DaemonEndpoint struct {
|
||||||
|
/*
|
||||||
|
The port tag was not properly in quotes in earlier releases, so it must be
|
||||||
|
uppercased for backwards compat (since it was falling back to var name of
|
||||||
|
'Port').
|
||||||
|
*/
|
||||||
|
|
||||||
// Port number of the given endpoint.
|
// Port number of the given endpoint.
|
||||||
Port int32 `json:port`
|
Port int32 `json:"Port"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
|
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
|
||||||
|
@ -2101,7 +2107,7 @@ type NodeStatus struct {
|
||||||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info
|
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info
|
||||||
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"`
|
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"`
|
||||||
// List of container images on this node
|
// List of container images on this node
|
||||||
Images []ContainerImage `json:"images",omitempty`
|
Images []ContainerImage `json:"images,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe a container image
|
// Describe a container image
|
||||||
|
|
|
@ -2048,7 +2048,7 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("VolumeStatsAggPeriod"))
|
r.EncodeString(codecSelferC_UTF81234, string("volumeStatsAggPeriod"))
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||||
yy149 := &x.VolumeStatsAggPeriod
|
yy149 := &x.VolumeStatsAggPeriod
|
||||||
yym150 := z.EncBinary()
|
yym150 := z.EncBinary()
|
||||||
|
@ -3190,7 +3190,7 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode
|
||||||
} else {
|
} else {
|
||||||
x.LowDiskSpaceThresholdMB = int(r.DecodeInt(codecSelferBitsize1234))
|
x.LowDiskSpaceThresholdMB = int(r.DecodeInt(codecSelferBitsize1234))
|
||||||
}
|
}
|
||||||
case "VolumeStatsAggPeriod":
|
case "volumeStatsAggPeriod":
|
||||||
if r.TryDecodeAsNil() {
|
if r.TryDecodeAsNil() {
|
||||||
x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{}
|
x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -230,7 +230,7 @@ type KubeletConfiguration struct {
|
||||||
// be rejected.
|
// be rejected.
|
||||||
LowDiskSpaceThresholdMB int `json:"lowDiskSpaceThresholdMB"`
|
LowDiskSpaceThresholdMB int `json:"lowDiskSpaceThresholdMB"`
|
||||||
// How frequently to calculate and cache volume disk usage for all pods
|
// How frequently to calculate and cache volume disk usage for all pods
|
||||||
VolumeStatsAggPeriod unversioned.Duration `json:volumeStatsAggPeriod`
|
VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"`
|
||||||
// networkPluginName is the name of the network plugin to be invoked for
|
// networkPluginName is the name of the network plugin to be invoked for
|
||||||
// various events in kubelet/pod lifecycle
|
// various events in kubelet/pod lifecycle
|
||||||
NetworkPluginName string `json:"networkPluginName"`
|
NetworkPluginName string `json:"networkPluginName"`
|
||||||
|
|
|
@ -206,7 +206,7 @@ func TestFlattenSuccess(t *testing.T) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleMinifyAndShorten() {
|
func Example_minifyAndShorten() {
|
||||||
certFile, _ := ioutil.TempFile("", "")
|
certFile, _ := ioutil.TempFile("", "")
|
||||||
defer os.Remove(certFile.Name())
|
defer os.Remove(certFile.Name())
|
||||||
keyFile, _ := ioutil.TempFile("", "")
|
keyFile, _ := ioutil.TempFile("", "")
|
||||||
|
|
|
@ -22,7 +22,7 @@ import (
|
||||||
"github.com/ghodss/yaml"
|
"github.com/ghodss/yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ExampleEmptyConfig() {
|
func Example_emptyConfig() {
|
||||||
defaultConfig := NewConfig()
|
defaultConfig := NewConfig()
|
||||||
|
|
||||||
output, err := yaml.Marshal(defaultConfig)
|
output, err := yaml.Marshal(defaultConfig)
|
||||||
|
@ -39,7 +39,7 @@ func ExampleEmptyConfig() {
|
||||||
// users: {}
|
// users: {}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleOfOptionsConfig() {
|
func Example_ofOptionsConfig() {
|
||||||
defaultConfig := NewConfig()
|
defaultConfig := NewConfig()
|
||||||
defaultConfig.Preferences.Colors = true
|
defaultConfig.Preferences.Colors = true
|
||||||
defaultConfig.Clusters["alfa"] = &Cluster{
|
defaultConfig.Clusters["alfa"] = &Cluster{
|
||||||
|
|
|
@ -376,7 +376,7 @@ func TestMigratingFileSourceMissingSkip(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleNoMergingOnExplicitPaths() {
|
func Example_noMergingOnExplicitPaths() {
|
||||||
commandLineFile, _ := ioutil.TempFile("", "")
|
commandLineFile, _ := ioutil.TempFile("", "")
|
||||||
defer os.Remove(commandLineFile.Name())
|
defer os.Remove(commandLineFile.Name())
|
||||||
envVarFile, _ := ioutil.TempFile("", "")
|
envVarFile, _ := ioutil.TempFile("", "")
|
||||||
|
@ -423,7 +423,7 @@ func ExampleNoMergingOnExplicitPaths() {
|
||||||
// token: red-token
|
// token: red-token
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleMergingSomeWithConflict() {
|
func Example_mergingSomeWithConflict() {
|
||||||
commandLineFile, _ := ioutil.TempFile("", "")
|
commandLineFile, _ := ioutil.TempFile("", "")
|
||||||
defer os.Remove(commandLineFile.Name())
|
defer os.Remove(commandLineFile.Name())
|
||||||
envVarFile, _ := ioutil.TempFile("", "")
|
envVarFile, _ := ioutil.TempFile("", "")
|
||||||
|
@ -476,7 +476,7 @@ func ExampleMergingSomeWithConflict() {
|
||||||
// token: yellow-token
|
// token: yellow-token
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleMergingEverythingNoConflicts() {
|
func Example_mergingEverythingNoConflicts() {
|
||||||
commandLineFile, _ := ioutil.TempFile("", "")
|
commandLineFile, _ := ioutil.TempFile("", "")
|
||||||
defer os.Remove(commandLineFile.Name())
|
defer os.Remove(commandLineFile.Name())
|
||||||
envVarFile, _ := ioutil.TempFile("", "")
|
envVarFile, _ := ioutil.TempFile("", "")
|
||||||
|
|
|
@ -98,8 +98,6 @@ func ObjectReaction(o ObjectRetriever, mapper meta.RESTMapper) ReactionFunc {
|
||||||
default:
|
default:
|
||||||
return false, nil, fmt.Errorf("no reaction implemented for %s", action)
|
return false, nil, fmt.Errorf("no reaction implemented for %s", action)
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, nil, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -104,7 +104,7 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
|
||||||
|
|
||||||
func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient, resyncPeriod time.Duration) *HorizontalController {
|
func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient, resyncPeriod time.Duration) *HorizontalController {
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster()
|
||||||
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{evtNamespacer.Events("")})
|
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: evtNamespacer.Events("")})
|
||||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||||
|
|
||||||
controller := &HorizontalController{
|
controller := &HorizontalController{
|
||||||
|
|
|
@ -333,7 +333,7 @@ func (tc *testCase) runTest(t *testing.T) {
|
||||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||||
|
|
||||||
broadcaster := record.NewBroadcasterForTests(0)
|
broadcaster := record.NewBroadcasterForTests(0)
|
||||||
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{testClient.Core().Events("")})
|
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: testClient.Core().Events("")})
|
||||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||||
|
|
||||||
hpaController := &HorizontalController{
|
hpaController := &HorizontalController{
|
||||||
|
|
|
@ -344,7 +344,7 @@ func stringBody(body string) io.ReadCloser {
|
||||||
// }
|
// }
|
||||||
//}
|
//}
|
||||||
|
|
||||||
func ExamplePrintReplicationControllerWithNamespace() {
|
func Example_printReplicationControllerWithNamespace() {
|
||||||
f, tf, codec := NewAPIFactory()
|
f, tf, codec := NewAPIFactory()
|
||||||
tf.Printer = kubectl.NewHumanReadablePrinter(false, true, false, false, false, false, []string{})
|
tf.Printer = kubectl.NewHumanReadablePrinter(false, true, false, false, false, false, []string{})
|
||||||
tf.Client = &fake.RESTClient{
|
tf.Client = &fake.RESTClient{
|
||||||
|
@ -389,7 +389,7 @@ func ExamplePrintReplicationControllerWithNamespace() {
|
||||||
// beep foo 1 1 10y
|
// beep foo 1 1 10y
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExamplePrintMultiContainersReplicationControllerWithWide() {
|
func Example_printMultiContainersReplicationControllerWithWide() {
|
||||||
f, tf, codec := NewAPIFactory()
|
f, tf, codec := NewAPIFactory()
|
||||||
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, true, false, false, false, []string{})
|
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, true, false, false, false, []string{})
|
||||||
tf.Client = &fake.RESTClient{
|
tf.Client = &fake.RESTClient{
|
||||||
|
@ -437,7 +437,7 @@ func ExamplePrintMultiContainersReplicationControllerWithWide() {
|
||||||
// foo 1 1 10y foo,foo2 someimage,someimage2 foo=bar
|
// foo 1 1 10y foo,foo2 someimage,someimage2 foo=bar
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExamplePrintReplicationController() {
|
func Example_printReplicationController() {
|
||||||
f, tf, codec := NewAPIFactory()
|
f, tf, codec := NewAPIFactory()
|
||||||
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, false, false, false, []string{})
|
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, false, false, false, []string{})
|
||||||
tf.Client = &fake.RESTClient{
|
tf.Client = &fake.RESTClient{
|
||||||
|
@ -485,7 +485,7 @@ func ExamplePrintReplicationController() {
|
||||||
// foo 1 1 10y
|
// foo 1 1 10y
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExamplePrintPodWithWideFormat() {
|
func Example_printPodWithWideFormat() {
|
||||||
f, tf, codec := NewAPIFactory()
|
f, tf, codec := NewAPIFactory()
|
||||||
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, true, false, false, false, []string{})
|
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, true, false, false, false, []string{})
|
||||||
tf.Client = &fake.RESTClient{
|
tf.Client = &fake.RESTClient{
|
||||||
|
@ -520,7 +520,7 @@ func ExamplePrintPodWithWideFormat() {
|
||||||
// test1 1/2 podPhase 6 10y kubernetes-minion-abcd
|
// test1 1/2 podPhase 6 10y kubernetes-minion-abcd
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExamplePrintPodWithShowLabels() {
|
func Example_printPodWithShowLabels() {
|
||||||
f, tf, codec := NewAPIFactory()
|
f, tf, codec := NewAPIFactory()
|
||||||
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, false, true, false, []string{})
|
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, false, true, false, []string{})
|
||||||
tf.Client = &fake.RESTClient{
|
tf.Client = &fake.RESTClient{
|
||||||
|
@ -651,7 +651,7 @@ func newAllPhasePodList() *api.PodList {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExamplePrintPodHideTerminated() {
|
func Example_printPodHideTerminated() {
|
||||||
f, tf, codec := NewAPIFactory()
|
f, tf, codec := NewAPIFactory()
|
||||||
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, false, false, false, []string{})
|
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, false, false, false, []string{})
|
||||||
tf.Client = &fake.RESTClient{
|
tf.Client = &fake.RESTClient{
|
||||||
|
@ -671,7 +671,7 @@ func ExamplePrintPodHideTerminated() {
|
||||||
// test5 1/2 Unknown 6 10y
|
// test5 1/2 Unknown 6 10y
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExamplePrintPodShowAll() {
|
func Example_printPodShowAll() {
|
||||||
f, tf, codec := NewAPIFactory()
|
f, tf, codec := NewAPIFactory()
|
||||||
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, true, false, false, []string{})
|
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, true, false, false, []string{})
|
||||||
tf.Client = &fake.RESTClient{
|
tf.Client = &fake.RESTClient{
|
||||||
|
@ -693,7 +693,7 @@ func ExamplePrintPodShowAll() {
|
||||||
// test5 1/2 Unknown 6 10y
|
// test5 1/2 Unknown 6 10y
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExamplePrintServiceWithNamespacesAndLabels() {
|
func Example_printServiceWithNamespacesAndLabels() {
|
||||||
f, tf, codec := NewAPIFactory()
|
f, tf, codec := NewAPIFactory()
|
||||||
tf.Printer = kubectl.NewHumanReadablePrinter(false, true, false, false, false, false, []string{"l1"})
|
tf.Printer = kubectl.NewHumanReadablePrinter(false, true, false, false, false, false, []string{"l1"})
|
||||||
tf.Client = &fake.RESTClient{
|
tf.Client = &fake.RESTClient{
|
||||||
|
|
|
@ -44,7 +44,7 @@ func newRedFederalCowHammerConfig() clientcmdapi.Config {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleView() {
|
func Example_view() {
|
||||||
expectedConfig := newRedFederalCowHammerConfig()
|
expectedConfig := newRedFederalCowHammerConfig()
|
||||||
test := configCommandTest{
|
test := configCommandTest{
|
||||||
args: []string{"view"},
|
args: []string{"view"},
|
||||||
|
|
|
@ -327,14 +327,14 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
|
||||||
// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
|
// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
|
||||||
// operator, DoubleEquals operator and In operator with only one element in the set.
|
// operator, DoubleEquals operator and In operator with only one element in the set.
|
||||||
if len(t.Spec.Selector.MatchExpressions) > 0 {
|
if len(t.Spec.Selector.MatchExpressions) > 0 {
|
||||||
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
|
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
|
||||||
}
|
}
|
||||||
return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
|
return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
|
||||||
case *extensions.ReplicaSet:
|
case *extensions.ReplicaSet:
|
||||||
// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
|
// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
|
||||||
// operator, DoubleEquals operator and In operator with only one element in the set.
|
// operator, DoubleEquals operator and In operator with only one element in the set.
|
||||||
if len(t.Spec.Selector.MatchExpressions) > 0 {
|
if len(t.Spec.Selector.MatchExpressions) > 0 {
|
||||||
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
|
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
|
||||||
}
|
}
|
||||||
return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
|
return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -56,10 +56,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// setUp is a convience function for setting up for (most) tests.
|
// setUp is a convience function for setting up for (most) tests.
|
||||||
func setUp(t *testing.T) (Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) {
|
func setUp(t *testing.T) (*Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) {
|
||||||
server := etcdtesting.NewEtcdTestClientServer(t)
|
server := etcdtesting.NewEtcdTestClientServer(t)
|
||||||
|
|
||||||
master := Master{
|
master := &Master{
|
||||||
GenericAPIServer: &genericapiserver.GenericAPIServer{},
|
GenericAPIServer: &genericapiserver.GenericAPIServer{},
|
||||||
}
|
}
|
||||||
config := Config{
|
config := Config{
|
||||||
|
|
|
@ -42,7 +42,7 @@ type FooList struct {
|
||||||
unversioned.TypeMeta `json:",inline"`
|
unversioned.TypeMeta `json:",inline"`
|
||||||
unversioned.ListMeta `json:"metadata,omitempty" description:"standard list metadata; see http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata"`
|
unversioned.ListMeta `json:"metadata,omitempty" description:"standard list metadata; see http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata"`
|
||||||
|
|
||||||
items []Foo `json:"items"`
|
Items []Foo `json:"items"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCodec(t *testing.T) {
|
func TestCodec(t *testing.T) {
|
||||||
|
|
|
@ -24,9 +24,9 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type encodable struct {
|
type encodable struct {
|
||||||
e Encoder `json:"-"`
|
E Encoder `json:"-"`
|
||||||
obj Object
|
obj Object
|
||||||
versions []unversioned.GroupVersion `json:"-"`
|
versions []unversioned.GroupVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e encodable) GetObjectKind() unversioned.ObjectKind { return e.obj.GetObjectKind() }
|
func (e encodable) GetObjectKind() unversioned.ObjectKind { return e.obj.GetObjectKind() }
|
||||||
|
@ -47,7 +47,7 @@ func (re encodable) UnmarshalJSON(in []byte) error {
|
||||||
// Marshal may get called on pointers or values, so implement MarshalJSON on value.
|
// Marshal may get called on pointers or values, so implement MarshalJSON on value.
|
||||||
// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
|
// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
|
||||||
func (re encodable) MarshalJSON() ([]byte, error) {
|
func (re encodable) MarshalJSON() ([]byte, error) {
|
||||||
return Encode(re.e, re.obj)
|
return Encode(re.E, re.obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEncodableList creates an object that will be encoded with the provided codec on demand.
|
// NewEncodableList creates an object that will be encoded with the provided codec on demand.
|
||||||
|
|
|
@ -21,7 +21,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ExampleTrailingNewline() {
|
func Example_trailingNewline() {
|
||||||
ld := NewLineDelimiter(os.Stdout, "|")
|
ld := NewLineDelimiter(os.Stdout, "|")
|
||||||
defer ld.Flush()
|
defer ld.Flush()
|
||||||
fmt.Fprint(ld, " Hello \n World \n")
|
fmt.Fprint(ld, " Hello \n World \n")
|
||||||
|
@ -30,7 +30,7 @@ func ExampleTrailingNewline() {
|
||||||
// | World |
|
// | World |
|
||||||
// ||
|
// ||
|
||||||
}
|
}
|
||||||
func ExampleNoTrailingNewline() {
|
func Example_noTrailingNewline() {
|
||||||
ld := NewLineDelimiter(os.Stdout, "|")
|
ld := NewLineDelimiter(os.Stdout, "|")
|
||||||
defer ld.Flush()
|
defer ld.Flush()
|
||||||
fmt.Fprint(ld, " Hello \n World ")
|
fmt.Fprint(ld, " Hello \n World ")
|
||||||
|
|
|
@ -31,8 +31,8 @@ import (
|
||||||
|
|
||||||
const pluginName = "kubernetes.io/flocker"
|
const pluginName = "kubernetes.io/flocker"
|
||||||
|
|
||||||
func newInitializedVolumePlugMgr(t *testing.T) (volume.VolumePluginMgr, string) {
|
func newInitializedVolumePlugMgr(t *testing.T) (*volume.VolumePluginMgr, string) {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := &volume.VolumePluginMgr{}
|
||||||
dir, err := utiltesting.MkTmpdir("flocker")
|
dir, err := utiltesting.MkTmpdir("flocker")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(dir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(dir, nil, nil))
|
||||||
|
|
|
@ -18,7 +18,6 @@ package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -90,13 +89,13 @@ func runK8petstore(restServers int, loadGenerators int, c *client.Client, ns str
|
||||||
// Run the k8petstore app, and log / fail if it returns any errors.
|
// Run the k8petstore app, and log / fail if it returns any errors.
|
||||||
// This should return quickly, assuming containers are downloaded.
|
// This should return quickly, assuming containers are downloaded.
|
||||||
if err = cmd.Start(); err != nil {
|
if err = cmd.Start(); err != nil {
|
||||||
log.Fatal(err)
|
Failf("%v", err)
|
||||||
}
|
}
|
||||||
// Make sure there are no command errors.
|
// Make sure there are no command errors.
|
||||||
if err = cmd.Wait(); err != nil {
|
if err = cmd.Wait(); err != nil {
|
||||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||||
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
||||||
log.Printf("Exit Status: %d", status.ExitStatus())
|
Logf("Exit Status: %d", status.ExitStatus())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ type LogsSizeVerifier struct {
|
||||||
client *client.Client
|
client *client.Client
|
||||||
stopChannel chan bool
|
stopChannel chan bool
|
||||||
// data stores LogSizeData groupped per IP and log_path
|
// data stores LogSizeData groupped per IP and log_path
|
||||||
data LogsSizeData
|
data *LogsSizeData
|
||||||
masterAddress string
|
masterAddress string
|
||||||
nodeAddresses []string
|
nodeAddresses []string
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
@ -117,13 +117,13 @@ type WorkItem struct {
|
||||||
backoffMultiplier int
|
backoffMultiplier int
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepareData(masterAddress string, nodeAddresses []string) LogsSizeData {
|
func prepareData(masterAddress string, nodeAddresses []string) *LogsSizeData {
|
||||||
data := make(LogSizeDataTimeseries)
|
data := make(LogSizeDataTimeseries)
|
||||||
ips := append(nodeAddresses, masterAddress)
|
ips := append(nodeAddresses, masterAddress)
|
||||||
for _, ip := range ips {
|
for _, ip := range ips {
|
||||||
data[ip] = make(map[string][]TimestampedSize)
|
data[ip] = make(map[string][]TimestampedSize)
|
||||||
}
|
}
|
||||||
return LogsSizeData{
|
return &LogsSizeData{
|
||||||
data: data,
|
data: data,
|
||||||
lock: sync.Mutex{},
|
lock: sync.Mutex{},
|
||||||
}
|
}
|
||||||
|
@ -164,7 +164,7 @@ func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier
|
||||||
for i := 0; i < workersNo; i++ {
|
for i := 0; i < workersNo; i++ {
|
||||||
workers[i] = &LogSizeGatherer{
|
workers[i] = &LogSizeGatherer{
|
||||||
stopChannel: stopChannel,
|
stopChannel: stopChannel,
|
||||||
data: &verifier.data,
|
data: verifier.data,
|
||||||
wg: &verifier.wg,
|
wg: &verifier.wg,
|
||||||
workChannel: workChannel,
|
workChannel: workChannel,
|
||||||
}
|
}
|
||||||
|
|
|
@ -307,23 +307,23 @@ func nowStamp() string {
|
||||||
return time.Now().Format(time.StampMilli)
|
return time.Now().Format(time.StampMilli)
|
||||||
}
|
}
|
||||||
|
|
||||||
func logf(level string, format string, args ...interface{}) {
|
func log(level string, format string, args ...interface{}) {
|
||||||
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
|
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Logf(format string, args ...interface{}) {
|
func Logf(format string, args ...interface{}) {
|
||||||
logf("INFO", format, args...)
|
log("INFO", format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Failf(format string, args ...interface{}) {
|
func Failf(format string, args ...interface{}) {
|
||||||
msg := fmt.Sprintf(format, args...)
|
msg := fmt.Sprintf(format, args...)
|
||||||
logf("FAIL", msg)
|
log("INFO", msg)
|
||||||
Fail(nowStamp()+": "+msg, 1)
|
Fail(nowStamp()+": "+msg, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Skipf(format string, args ...interface{}) {
|
func Skipf(format string, args ...interface{}) {
|
||||||
msg := fmt.Sprintf(format, args...)
|
msg := fmt.Sprintf(format, args...)
|
||||||
logf("SKIP", msg)
|
log("INFO", msg)
|
||||||
Skip(nowStamp() + ": " + msg)
|
Skip(nowStamp() + ": " + msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,11 +47,11 @@ type ResourceConsumerHandler struct {
|
||||||
metricsLock sync.Mutex
|
metricsLock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewResourceConsumerHandler() ResourceConsumerHandler {
|
func NewResourceConsumerHandler() *ResourceConsumerHandler {
|
||||||
return ResourceConsumerHandler{metrics: map[string]float64{}}
|
return &ResourceConsumerHandler{metrics: map[string]float64{}}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (handler ResourceConsumerHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
func (handler *ResourceConsumerHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
// handle exposing metrics in Prometheus format (both GET & POST)
|
// handle exposing metrics in Prometheus format (both GET & POST)
|
||||||
if req.URL.Path == metricsAddress {
|
if req.URL.Path == metricsAddress {
|
||||||
handler.handleMetrics(w)
|
handler.handleMetrics(w)
|
||||||
|
@ -89,7 +89,7 @@ func (handler ResourceConsumerHandler) ServeHTTP(w http.ResponseWriter, req *htt
|
||||||
http.Error(w, unknownFunction, http.StatusNotFound)
|
http.Error(w, unknownFunction, http.StatusNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (handler ResourceConsumerHandler) handleConsumeCPU(w http.ResponseWriter, query url.Values) {
|
func (handler *ResourceConsumerHandler) handleConsumeCPU(w http.ResponseWriter, query url.Values) {
|
||||||
// geting string data for consumeCPU
|
// geting string data for consumeCPU
|
||||||
durationSecString := query.Get(durationSecQuery)
|
durationSecString := query.Get(durationSecQuery)
|
||||||
millicoresString := query.Get(millicoresQuery)
|
millicoresString := query.Get(millicoresQuery)
|
||||||
|
@ -112,7 +112,7 @@ func (handler ResourceConsumerHandler) handleConsumeCPU(w http.ResponseWriter, q
|
||||||
fmt.Fprintln(w, durationSec, durationSecQuery)
|
fmt.Fprintln(w, durationSec, durationSecQuery)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (handler ResourceConsumerHandler) handleConsumeMem(w http.ResponseWriter, query url.Values) {
|
func (handler *ResourceConsumerHandler) handleConsumeMem(w http.ResponseWriter, query url.Values) {
|
||||||
// geting string data for consumeMem
|
// geting string data for consumeMem
|
||||||
durationSecString := query.Get(durationSecQuery)
|
durationSecString := query.Get(durationSecQuery)
|
||||||
megabytesString := query.Get(megabytesQuery)
|
megabytesString := query.Get(megabytesQuery)
|
||||||
|
@ -135,13 +135,13 @@ func (handler ResourceConsumerHandler) handleConsumeMem(w http.ResponseWriter, q
|
||||||
fmt.Fprintln(w, durationSec, durationSecQuery)
|
fmt.Fprintln(w, durationSec, durationSecQuery)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (handler ResourceConsumerHandler) handleGetCurrentStatus(w http.ResponseWriter) {
|
func (handler *ResourceConsumerHandler) handleGetCurrentStatus(w http.ResponseWriter) {
|
||||||
GetCurrentStatus()
|
GetCurrentStatus()
|
||||||
fmt.Fprintln(w, "Warning: not implemented!")
|
fmt.Fprintln(w, "Warning: not implemented!")
|
||||||
fmt.Fprint(w, getCurrentStatusAddress[1:])
|
fmt.Fprint(w, getCurrentStatusAddress[1:])
|
||||||
}
|
}
|
||||||
|
|
||||||
func (handler ResourceConsumerHandler) handleMetrics(w http.ResponseWriter) {
|
func (handler *ResourceConsumerHandler) handleMetrics(w http.ResponseWriter) {
|
||||||
handler.metricsLock.Lock()
|
handler.metricsLock.Lock()
|
||||||
defer handler.metricsLock.Unlock()
|
defer handler.metricsLock.Unlock()
|
||||||
for k, v := range handler.metrics {
|
for k, v := range handler.metrics {
|
||||||
|
@ -151,7 +151,7 @@ func (handler ResourceConsumerHandler) handleMetrics(w http.ResponseWriter) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (handler ResourceConsumerHandler) bumpMetric(metric string, delta float64, duration time.Duration) {
|
func (handler *ResourceConsumerHandler) bumpMetric(metric string, delta float64, duration time.Duration) {
|
||||||
handler.metricsLock.Lock()
|
handler.metricsLock.Lock()
|
||||||
if _, ok := handler.metrics[metric]; ok {
|
if _, ok := handler.metrics[metric]; ok {
|
||||||
handler.metrics[metric] += delta
|
handler.metrics[metric] += delta
|
||||||
|
@ -167,7 +167,7 @@ func (handler ResourceConsumerHandler) bumpMetric(metric string, delta float64,
|
||||||
handler.metricsLock.Unlock()
|
handler.metricsLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (handler ResourceConsumerHandler) handleBumpMetric(w http.ResponseWriter, query url.Values) {
|
func (handler *ResourceConsumerHandler) handleBumpMetric(w http.ResponseWriter, query url.Values) {
|
||||||
// geting string data for handleBumpMetric
|
// geting string data for handleBumpMetric
|
||||||
metric := query.Get(metricNameQuery)
|
metric := query.Get(metricNameQuery)
|
||||||
deltaString := query.Get(deltaQuery)
|
deltaString := query.Get(deltaQuery)
|
||||||
|
|
Loading…
Reference in New Issue