Vet fixes, mostly pass lock by value errors.

pull/6/head
goltermann 2016-03-23 16:45:24 -07:00
parent 8e4ac1925b
commit 696423e044
33 changed files with 136 additions and 118 deletions

View File

@ -15627,9 +15627,6 @@
"v1.NodeStatus": {
"id": "v1.NodeStatus",
"description": "NodeStatus is information about the current status of a node.",
"required": [
"images"
],
"properties": {
"capacity": {
"type": "any",

View File

@ -153,7 +153,6 @@ func (g *genProtoIDL) GenerateType(c *generator.Context, t *types.Type, w io.Wri
default:
return b.unknown(sw)
}
return sw.Error()
}
// ProtobufFromGoNamer finds the protobuf name of a type (and its package, and
@ -424,7 +423,7 @@ func memberTypeToProtobufField(locator ProtobufLocator, field *protoField, t *ty
field.Nullable = true
case types.Alias:
if err := memberTypeToProtobufField(locator, field, t.Underlying); err != nil {
log.Printf("failed to alias: %s %s: err", t.Name, t.Underlying.Name, err)
log.Printf("failed to alias: %s %s: err %v", t.Name, t.Underlying.Name, err)
return err
}
if field.Extras == nil {
@ -594,7 +593,7 @@ func membersToFields(locator ProtobufLocator, t *types.Type, localPackage types.
tag := field.Tag
if tag != -1 {
if existing, ok := byTag[tag]; ok {
return nil, fmt.Errorf("field %q and %q in %q both have tag %d", field.Name, existing.Name, tag)
return nil, fmt.Errorf("field %q and %q both have tag %d", field.Name, existing.Name, tag)
}
byTag[tag] = field
}

View File

@ -165,7 +165,7 @@ var FooAnotherVar proto.Frobber = proto.AnotherVar
t.Errorf("Wanted, got:\n%v\n-----\n%v\n", e, a)
}
if p := u.Package("base/foo/proto"); !p.HasImport("base/common/proto") {
t.Errorf("Unexpected lack of import line: %#s", p.Imports)
t.Errorf("Unexpected lack of import line: %s", p.Imports)
}
}

View File

@ -488,7 +488,7 @@ func (k *Executor) bindAndWatchTask(driver bindings.ExecutorDriver, task *mesos.
// within the launch timeout window we should see a pod-task update via the registry.
// if we see a Running update then we need to generate a TASK_RUNNING status update for mesos.
handlerFinished := false
handler := watchHandler{
handler := &watchHandler{
expiration: watchExpiration{
timeout: launchTimer.C,
onEvent: func(taskID string) {

View File

@ -118,7 +118,7 @@ func NewRegistry(client *clientset.Clientset) Registry {
return r
}
func (r registryImpl) watch() <-chan *PodEvent {
func (r *registryImpl) watch() <-chan *PodEvent {
return r.updates
}
@ -130,26 +130,26 @@ func taskIDFor(pod *api.Pod) (taskID string, err error) {
return
}
func (r registryImpl) shutdown() {
func (r *registryImpl) shutdown() {
//TODO(jdef) flesh this out
r.m.Lock()
defer r.m.Unlock()
r.boundTasks = map[string]*api.Pod{}
}
func (r registryImpl) empty() bool {
func (r *registryImpl) empty() bool {
r.m.RLock()
defer r.m.RUnlock()
return len(r.boundTasks) == 0
}
func (r registryImpl) pod(taskID string) *api.Pod {
func (r *registryImpl) pod(taskID string) *api.Pod {
r.m.RLock()
defer r.m.RUnlock()
return r.boundTasks[taskID]
}
func (r registryImpl) Remove(taskID string) error {
func (r *registryImpl) Remove(taskID string) error {
r.m.Lock()
defer r.m.Unlock()
pod, ok := r.boundTasks[taskID]
@ -169,7 +169,7 @@ func (r registryImpl) Remove(taskID string) error {
return nil
}
func (r registryImpl) Update(pod *api.Pod) (*PodEvent, error) {
func (r *registryImpl) Update(pod *api.Pod) (*PodEvent, error) {
// Don't do anything for pods without task anotation which means:
// - "pre-scheduled" pods which have a NodeName set to this node without being scheduled already.
// - static/mirror pods: they'll never have a TaskID annotation, and we don't expect them to ever change.
@ -254,8 +254,7 @@ func copyPorts(dest, src *api.Pod) bool {
return true
}
func (r registryImpl) bind(taskID string, pod *api.Pod) error {
func (r *registryImpl) bind(taskID string, pod *api.Pod) error {
// validate taskID matches that of the annotation
annotatedTaskID, err := taskIDFor(pod)
if err != nil {

View File

@ -55,7 +55,7 @@ type (
watcher struct {
updates <-chan *PodEvent
rw sync.RWMutex
handlers map[string]watchHandler
handlers map[string]*watchHandler
filters []watchFilter
runOnce chan struct{}
}
@ -64,7 +64,7 @@ type (
func newWatcher(updates <-chan *PodEvent) *watcher {
return &watcher{
updates: updates,
handlers: make(map[string]watchHandler),
handlers: make(map[string]*watchHandler),
runOnce: make(chan struct{}),
}
}
@ -86,7 +86,7 @@ updateLoop:
}
}
log.V(1).Info("handling " + u.FormatShort())
h, ok := func() (h watchHandler, ok bool) {
h, ok := func() (h *watchHandler, ok bool) {
pw.rw.RLock()
defer pw.rw.RUnlock()
h, ok = pw.handlers[u.taskID]
@ -125,7 +125,7 @@ func (pw *watcher) addFilter(f watchFilter) {
}
// forTask associates a handler `h` with the given taskID.
func (pw *watcher) forTask(taskID string, h watchHandler) {
func (pw *watcher) forTask(taskID string, h *watchHandler) {
pw.rw.Lock()
pw.handlers[taskID] = h
pw.rw.Unlock()

View File

@ -4492,7 +4492,7 @@ The resulting set of endpoints can be viewed as:<br>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">images</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">List of container images on this node</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_containerimage">v1.ContainerImage</a> array</p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
@ -7780,7 +7780,7 @@ The resulting set of endpoints can be viewed as:<br>
</div>
<div id="footer">
<div id="footer-text">
Last updated 2016-03-21 22:46:36 UTC
Last updated 2016-04-06 18:15:59 UTC
</div>
</div>
</body>

View File

@ -101,7 +101,7 @@ func InterpretWatchError(err error, resource unversioned.GroupResource, name str
switch {
case storage.IsInvalidError(err):
invalidError, _ := err.(storage.InvalidError)
return errors.NewInvalid(unversioned.GroupKind{resource.Group, resource.Resource}, name, invalidError.Errs)
return errors.NewInvalid(unversioned.GroupKind{Group: resource.Group, Kind: resource.Resource}, name, invalidError.Errs)
default:
return err
}

View File

@ -52,7 +52,7 @@ func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Obje
}
// Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too.
if options.Preconditions != nil && options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.UID {
return false, false, errors.NewConflict(unversioned.GroupResource{gvk.Group, gvk.Kind}, objectMeta.Name, fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.UID))
return false, false, errors.NewConflict(unversioned.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.Name, fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.UID))
}
gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy)
if !ok {

View File

@ -33273,11 +33273,12 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
yyq2[4] = len(x.Addresses) != 0
yyq2[5] = true
yyq2[6] = true
yyq2[7] = len(x.Images) != 0
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(8)
} else {
yynn2 = 1
yynn2 = 0
for _, b := range yyq2 {
if b {
yynn2++
@ -33449,28 +33450,34 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if x.Images == nil {
r.EncodeNil()
} else {
yym29 := z.EncBinary()
_ = yym29
if false {
if yyq2[7] {
if x.Images == nil {
r.EncodeNil()
} else {
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
yym29 := z.EncBinary()
_ = yym29
if false {
} else {
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
}
}
} else {
r.EncodeNil()
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("images"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.Images == nil {
r.EncodeNil()
} else {
yym30 := z.EncBinary()
_ = yym30
if false {
if yyq2[7] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("images"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.Images == nil {
r.EncodeNil()
} else {
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
yym30 := z.EncBinary()
_ = yym30
if false {
} else {
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
}
}
}
}

View File

@ -1671,8 +1671,14 @@ type NodeSpec struct {
// DaemonEndpoint contains information about a single Daemon endpoint.
type DaemonEndpoint struct {
/*
The port tag was not properly in quotes in earlier releases, so it must be
uppercased for backwards compat (since it was falling back to var name of
'Port').
*/
// Port number of the given endpoint.
Port int `json:port`
Port int `json:"Port"`
}
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
@ -1718,7 +1724,7 @@ type NodeStatus struct {
// Set of ids/uuids to uniquely identify the node.
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"`
// List of container images on this node
Images []ContainerImage `json:"images",omitempty`
Images []ContainerImage `json:"images,omitempty"`
}
// Describe a container image

View File

@ -33155,11 +33155,12 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
yyq2[4] = len(x.Addresses) != 0
yyq2[5] = true
yyq2[6] = true
yyq2[7] = len(x.Images) != 0
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(8)
} else {
yynn2 = 1
yynn2 = 0
for _, b := range yyq2 {
if b {
yynn2++
@ -33331,28 +33332,34 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if x.Images == nil {
r.EncodeNil()
} else {
yym29 := z.EncBinary()
_ = yym29
if false {
if yyq2[7] {
if x.Images == nil {
r.EncodeNil()
} else {
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
yym29 := z.EncBinary()
_ = yym29
if false {
} else {
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
}
}
} else {
r.EncodeNil()
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("images"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.Images == nil {
r.EncodeNil()
} else {
yym30 := z.EncBinary()
_ = yym30
if false {
if yyq2[7] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("images"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.Images == nil {
r.EncodeNil()
} else {
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
yym30 := z.EncBinary()
_ = yym30
if false {
} else {
h.encSliceContainerImage(([]ContainerImage)(x.Images), e)
}
}
}
}

View File

@ -2047,8 +2047,14 @@ type NodeSpec struct {
// DaemonEndpoint contains information about a single Daemon endpoint.
type DaemonEndpoint struct {
/*
The port tag was not properly in quotes in earlier releases, so it must be
uppercased for backwards compat (since it was falling back to var name of
'Port').
*/
// Port number of the given endpoint.
Port int32 `json:port`
Port int32 `json:"Port"`
}
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
@ -2101,7 +2107,7 @@ type NodeStatus struct {
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"`
// List of container images on this node
Images []ContainerImage `json:"images",omitempty`
Images []ContainerImage `json:"images,omitempty"`
}
// Describe a container image

View File

@ -2048,7 +2048,7 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("VolumeStatsAggPeriod"))
r.EncodeString(codecSelferC_UTF81234, string("volumeStatsAggPeriod"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yy149 := &x.VolumeStatsAggPeriod
yym150 := z.EncBinary()
@ -3190,7 +3190,7 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode
} else {
x.LowDiskSpaceThresholdMB = int(r.DecodeInt(codecSelferBitsize1234))
}
case "VolumeStatsAggPeriod":
case "volumeStatsAggPeriod":
if r.TryDecodeAsNil() {
x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{}
} else {

View File

@ -230,7 +230,7 @@ type KubeletConfiguration struct {
// be rejected.
LowDiskSpaceThresholdMB int `json:"lowDiskSpaceThresholdMB"`
// How frequently to calculate and cache volume disk usage for all pods
VolumeStatsAggPeriod unversioned.Duration `json:volumeStatsAggPeriod`
VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"`
// networkPluginName is the name of the network plugin to be invoked for
// various events in kubelet/pod lifecycle
NetworkPluginName string `json:"networkPluginName"`

View File

@ -206,7 +206,7 @@ func TestFlattenSuccess(t *testing.T) {
}
func ExampleMinifyAndShorten() {
func Example_minifyAndShorten() {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")

View File

@ -22,7 +22,7 @@ import (
"github.com/ghodss/yaml"
)
func ExampleEmptyConfig() {
func Example_emptyConfig() {
defaultConfig := NewConfig()
output, err := yaml.Marshal(defaultConfig)
@ -39,7 +39,7 @@ func ExampleEmptyConfig() {
// users: {}
}
func ExampleOfOptionsConfig() {
func Example_ofOptionsConfig() {
defaultConfig := NewConfig()
defaultConfig.Preferences.Colors = true
defaultConfig.Clusters["alfa"] = &Cluster{

View File

@ -376,7 +376,7 @@ func TestMigratingFileSourceMissingSkip(t *testing.T) {
}
}
func ExampleNoMergingOnExplicitPaths() {
func Example_noMergingOnExplicitPaths() {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
envVarFile, _ := ioutil.TempFile("", "")
@ -423,7 +423,7 @@ func ExampleNoMergingOnExplicitPaths() {
// token: red-token
}
func ExampleMergingSomeWithConflict() {
func Example_mergingSomeWithConflict() {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
envVarFile, _ := ioutil.TempFile("", "")
@ -476,7 +476,7 @@ func ExampleMergingSomeWithConflict() {
// token: yellow-token
}
func ExampleMergingEverythingNoConflicts() {
func Example_mergingEverythingNoConflicts() {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
envVarFile, _ := ioutil.TempFile("", "")

View File

@ -98,8 +98,6 @@ func ObjectReaction(o ObjectRetriever, mapper meta.RESTMapper) ReactionFunc {
default:
return false, nil, fmt.Errorf("no reaction implemented for %s", action)
}
return true, nil, nil
}
}

View File

@ -104,7 +104,7 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient, resyncPeriod time.Duration) *HorizontalController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{evtNamespacer.Events("")})
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: evtNamespacer.Events("")})
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
controller := &HorizontalController{

View File

@ -333,7 +333,7 @@ func (tc *testCase) runTest(t *testing.T) {
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
broadcaster := record.NewBroadcasterForTests(0)
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{testClient.Core().Events("")})
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: testClient.Core().Events("")})
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
hpaController := &HorizontalController{

View File

@ -344,7 +344,7 @@ func stringBody(body string) io.ReadCloser {
// }
//}
func ExamplePrintReplicationControllerWithNamespace() {
func Example_printReplicationControllerWithNamespace() {
f, tf, codec := NewAPIFactory()
tf.Printer = kubectl.NewHumanReadablePrinter(false, true, false, false, false, false, []string{})
tf.Client = &fake.RESTClient{
@ -389,7 +389,7 @@ func ExamplePrintReplicationControllerWithNamespace() {
// beep foo 1 1 10y
}
func ExamplePrintMultiContainersReplicationControllerWithWide() {
func Example_printMultiContainersReplicationControllerWithWide() {
f, tf, codec := NewAPIFactory()
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, true, false, false, false, []string{})
tf.Client = &fake.RESTClient{
@ -437,7 +437,7 @@ func ExamplePrintMultiContainersReplicationControllerWithWide() {
// foo 1 1 10y foo,foo2 someimage,someimage2 foo=bar
}
func ExamplePrintReplicationController() {
func Example_printReplicationController() {
f, tf, codec := NewAPIFactory()
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, false, false, false, []string{})
tf.Client = &fake.RESTClient{
@ -485,7 +485,7 @@ func ExamplePrintReplicationController() {
// foo 1 1 10y
}
func ExamplePrintPodWithWideFormat() {
func Example_printPodWithWideFormat() {
f, tf, codec := NewAPIFactory()
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, true, false, false, false, []string{})
tf.Client = &fake.RESTClient{
@ -520,7 +520,7 @@ func ExamplePrintPodWithWideFormat() {
// test1 1/2 podPhase 6 10y kubernetes-minion-abcd
}
func ExamplePrintPodWithShowLabels() {
func Example_printPodWithShowLabels() {
f, tf, codec := NewAPIFactory()
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, false, true, false, []string{})
tf.Client = &fake.RESTClient{
@ -651,7 +651,7 @@ func newAllPhasePodList() *api.PodList {
}
}
func ExamplePrintPodHideTerminated() {
func Example_printPodHideTerminated() {
f, tf, codec := NewAPIFactory()
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, false, false, false, []string{})
tf.Client = &fake.RESTClient{
@ -671,7 +671,7 @@ func ExamplePrintPodHideTerminated() {
// test5 1/2 Unknown 6 10y
}
func ExamplePrintPodShowAll() {
func Example_printPodShowAll() {
f, tf, codec := NewAPIFactory()
tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, true, false, false, []string{})
tf.Client = &fake.RESTClient{
@ -693,7 +693,7 @@ func ExamplePrintPodShowAll() {
// test5 1/2 Unknown 6 10y
}
func ExamplePrintServiceWithNamespacesAndLabels() {
func Example_printServiceWithNamespacesAndLabels() {
f, tf, codec := NewAPIFactory()
tf.Printer = kubectl.NewHumanReadablePrinter(false, true, false, false, false, false, []string{"l1"})
tf.Client = &fake.RESTClient{

View File

@ -44,7 +44,7 @@ func newRedFederalCowHammerConfig() clientcmdapi.Config {
}
}
func ExampleView() {
func Example_view() {
expectedConfig := newRedFederalCowHammerConfig()
test := configCommandTest{
args: []string{"view"},

View File

@ -327,14 +327,14 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
// operator, DoubleEquals operator and In operator with only one element in the set.
if len(t.Spec.Selector.MatchExpressions) > 0 {
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
}
return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
case *extensions.ReplicaSet:
// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
// operator, DoubleEquals operator and In operator with only one element in the set.
if len(t.Spec.Selector.MatchExpressions) > 0 {
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
}
return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
default:

View File

@ -56,10 +56,10 @@ import (
)
// setUp is a convience function for setting up for (most) tests.
func setUp(t *testing.T) (Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) {
func setUp(t *testing.T) (*Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) {
server := etcdtesting.NewEtcdTestClientServer(t)
master := Master{
master := &Master{
GenericAPIServer: &genericapiserver.GenericAPIServer{},
}
config := Config{

View File

@ -42,7 +42,7 @@ type FooList struct {
unversioned.TypeMeta `json:",inline"`
unversioned.ListMeta `json:"metadata,omitempty" description:"standard list metadata; see http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata"`
items []Foo `json:"items"`
Items []Foo `json:"items"`
}
func TestCodec(t *testing.T) {

View File

@ -24,9 +24,9 @@ import (
)
type encodable struct {
e Encoder `json:"-"`
E Encoder `json:"-"`
obj Object
versions []unversioned.GroupVersion `json:"-"`
versions []unversioned.GroupVersion
}
func (e encodable) GetObjectKind() unversioned.ObjectKind { return e.obj.GetObjectKind() }
@ -47,7 +47,7 @@ func (re encodable) UnmarshalJSON(in []byte) error {
// Marshal may get called on pointers or values, so implement MarshalJSON on value.
// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
func (re encodable) MarshalJSON() ([]byte, error) {
return Encode(re.e, re.obj)
return Encode(re.E, re.obj)
}
// NewEncodableList creates an object that will be encoded with the provided codec on demand.

View File

@ -21,7 +21,7 @@ import (
"os"
)
func ExampleTrailingNewline() {
func Example_trailingNewline() {
ld := NewLineDelimiter(os.Stdout, "|")
defer ld.Flush()
fmt.Fprint(ld, " Hello \n World \n")
@ -30,7 +30,7 @@ func ExampleTrailingNewline() {
// | World |
// ||
}
func ExampleNoTrailingNewline() {
func Example_noTrailingNewline() {
ld := NewLineDelimiter(os.Stdout, "|")
defer ld.Flush()
fmt.Fprint(ld, " Hello \n World ")

View File

@ -31,8 +31,8 @@ import (
const pluginName = "kubernetes.io/flocker"
func newInitializedVolumePlugMgr(t *testing.T) (volume.VolumePluginMgr, string) {
plugMgr := volume.VolumePluginMgr{}
func newInitializedVolumePlugMgr(t *testing.T) (*volume.VolumePluginMgr, string) {
plugMgr := &volume.VolumePluginMgr{}
dir, err := utiltesting.MkTmpdir("flocker")
assert.NoError(t, err)
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(dir, nil, nil))

View File

@ -18,7 +18,6 @@ package e2e
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
@ -90,13 +89,13 @@ func runK8petstore(restServers int, loadGenerators int, c *client.Client, ns str
// Run the k8petstore app, and log / fail if it returns any errors.
// This should return quickly, assuming containers are downloaded.
if err = cmd.Start(); err != nil {
log.Fatal(err)
Failf("%v", err)
}
// Make sure there are no command errors.
if err = cmd.Wait(); err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
log.Printf("Exit Status: %d", status.ExitStatus())
Logf("Exit Status: %d", status.ExitStatus())
}
}
}

View File

@ -67,7 +67,7 @@ type LogsSizeVerifier struct {
client *client.Client
stopChannel chan bool
// data stores LogSizeData groupped per IP and log_path
data LogsSizeData
data *LogsSizeData
masterAddress string
nodeAddresses []string
wg sync.WaitGroup
@ -117,13 +117,13 @@ type WorkItem struct {
backoffMultiplier int
}
func prepareData(masterAddress string, nodeAddresses []string) LogsSizeData {
func prepareData(masterAddress string, nodeAddresses []string) *LogsSizeData {
data := make(LogSizeDataTimeseries)
ips := append(nodeAddresses, masterAddress)
for _, ip := range ips {
data[ip] = make(map[string][]TimestampedSize)
}
return LogsSizeData{
return &LogsSizeData{
data: data,
lock: sync.Mutex{},
}
@ -164,7 +164,7 @@ func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier
for i := 0; i < workersNo; i++ {
workers[i] = &LogSizeGatherer{
stopChannel: stopChannel,
data: &verifier.data,
data: verifier.data,
wg: &verifier.wg,
workChannel: workChannel,
}

View File

@ -307,23 +307,23 @@ func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func logf(level string, format string, args ...interface{}) {
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
func Logf(format string, args ...interface{}) {
logf("INFO", format, args...)
log("INFO", format, args...)
}
func Failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
logf("FAIL", msg)
log("INFO", msg)
Fail(nowStamp()+": "+msg, 1)
}
func Skipf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
logf("SKIP", msg)
log("INFO", msg)
Skip(nowStamp() + ": " + msg)
}

View File

@ -47,11 +47,11 @@ type ResourceConsumerHandler struct {
metricsLock sync.Mutex
}
func NewResourceConsumerHandler() ResourceConsumerHandler {
return ResourceConsumerHandler{metrics: map[string]float64{}}
func NewResourceConsumerHandler() *ResourceConsumerHandler {
return &ResourceConsumerHandler{metrics: map[string]float64{}}
}
func (handler ResourceConsumerHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
func (handler *ResourceConsumerHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// handle exposing metrics in Prometheus format (both GET & POST)
if req.URL.Path == metricsAddress {
handler.handleMetrics(w)
@ -89,7 +89,7 @@ func (handler ResourceConsumerHandler) ServeHTTP(w http.ResponseWriter, req *htt
http.Error(w, unknownFunction, http.StatusNotFound)
}
func (handler ResourceConsumerHandler) handleConsumeCPU(w http.ResponseWriter, query url.Values) {
func (handler *ResourceConsumerHandler) handleConsumeCPU(w http.ResponseWriter, query url.Values) {
// geting string data for consumeCPU
durationSecString := query.Get(durationSecQuery)
millicoresString := query.Get(millicoresQuery)
@ -112,7 +112,7 @@ func (handler ResourceConsumerHandler) handleConsumeCPU(w http.ResponseWriter, q
fmt.Fprintln(w, durationSec, durationSecQuery)
}
func (handler ResourceConsumerHandler) handleConsumeMem(w http.ResponseWriter, query url.Values) {
func (handler *ResourceConsumerHandler) handleConsumeMem(w http.ResponseWriter, query url.Values) {
// geting string data for consumeMem
durationSecString := query.Get(durationSecQuery)
megabytesString := query.Get(megabytesQuery)
@ -135,13 +135,13 @@ func (handler ResourceConsumerHandler) handleConsumeMem(w http.ResponseWriter, q
fmt.Fprintln(w, durationSec, durationSecQuery)
}
func (handler ResourceConsumerHandler) handleGetCurrentStatus(w http.ResponseWriter) {
func (handler *ResourceConsumerHandler) handleGetCurrentStatus(w http.ResponseWriter) {
GetCurrentStatus()
fmt.Fprintln(w, "Warning: not implemented!")
fmt.Fprint(w, getCurrentStatusAddress[1:])
}
func (handler ResourceConsumerHandler) handleMetrics(w http.ResponseWriter) {
func (handler *ResourceConsumerHandler) handleMetrics(w http.ResponseWriter) {
handler.metricsLock.Lock()
defer handler.metricsLock.Unlock()
for k, v := range handler.metrics {
@ -151,7 +151,7 @@ func (handler ResourceConsumerHandler) handleMetrics(w http.ResponseWriter) {
}
}
func (handler ResourceConsumerHandler) bumpMetric(metric string, delta float64, duration time.Duration) {
func (handler *ResourceConsumerHandler) bumpMetric(metric string, delta float64, duration time.Duration) {
handler.metricsLock.Lock()
if _, ok := handler.metrics[metric]; ok {
handler.metrics[metric] += delta
@ -167,7 +167,7 @@ func (handler ResourceConsumerHandler) bumpMetric(metric string, delta float64,
handler.metricsLock.Unlock()
}
func (handler ResourceConsumerHandler) handleBumpMetric(w http.ResponseWriter, query url.Values) {
func (handler *ResourceConsumerHandler) handleBumpMetric(w http.ResponseWriter, query url.Values) {
// geting string data for handleBumpMetric
metric := query.Get(metricNameQuery)
deltaString := query.Get(deltaQuery)