Enable vet

pull/6/head
goltermann 2016-04-15 12:18:27 -07:00
parent f3f6ffaa28
commit 3fa6c6f6d9
16 changed files with 33 additions and 35 deletions

View File

@ -128,7 +128,7 @@ clean:
# make vet
# make vet WHAT=pkg/kubelet
vet:
hack/vet-go.sh $(WHAT) $(TESTS)
hack/verify-govet.sh $(WHAT) $(TESTS)
.PHONY: vet
# Build a release

View File

@ -84,7 +84,7 @@ func NewCMServer() *CMServer {
KubeAPIQPS: 20.0,
KubeAPIBurst: 30,
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
ControllerStartInterval: unversioned.Duration{0 * time.Second},
ControllerStartInterval: unversioned.Duration{Duration: 0 * time.Second},
},
}
return &s

View File

@ -136,7 +136,7 @@ type Clientset struct {
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return &fakediscovery.FakeDiscovery{&c.Fake}
return &fakediscovery.FakeDiscovery{Fake: &c.Fake}
}
`
@ -147,6 +147,6 @@ var _ clientset.Interface = &Clientset{}
var clientsetInterfaceImplTemplate = `
// $.Group$ retrieves the $.Group$Client
func (c *Clientset) $.Group$() $.PackageName$.$.Group$Interface {
return &fake$.PackageName$.Fake$.Group${&c.Fake}
return &fake$.PackageName$.Fake$.Group${Fake: &c.Fake}
}
`

View File

@ -54,12 +54,12 @@ type Clientset struct {
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return &fakediscovery.FakeDiscovery{&c.Fake}
return &fakediscovery.FakeDiscovery{Fake: &c.Fake}
}
var _ clientset.Interface = &Clientset{}
// Testgroup retrieves the TestgroupClient
func (c *Clientset) Testgroup() unversionedtestgroup.TestgroupInterface {
return &fakeunversionedtestgroup.FakeTestgroup{&c.Fake}
return &fakeunversionedtestgroup.FakeTestgroup{Fake: &c.Fake}
}

View File

@ -72,6 +72,4 @@ for p in $pids; do
wait $p || let "failedfiles+=1"
done
# hardcode a healthy exit until all vet errors can be fixed
#exit $failedfiles
exit 0
exit $failedfiles

View File

@ -56,17 +56,17 @@ type Clientset struct {
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return &fakediscovery.FakeDiscovery{&c.Fake}
return &fakediscovery.FakeDiscovery{Fake: &c.Fake}
}
var _ clientset.Interface = &Clientset{}
// Core retrieves the CoreClient
func (c *Clientset) Core() unversionedcore.CoreInterface {
return &fakeunversionedcore.FakeCore{&c.Fake}
return &fakeunversionedcore.FakeCore{Fake: &c.Fake}
}
// Extensions retrieves the ExtensionsClient
func (c *Clientset) Extensions() unversionedextensions.ExtensionsInterface {
return &fakeunversionedextensions.FakeExtensions{&c.Fake}
return &fakeunversionedextensions.FakeExtensions{Fake: &c.Fake}
}

View File

@ -56,17 +56,17 @@ type Clientset struct {
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return &fakediscovery.FakeDiscovery{&c.Fake}
return &fakediscovery.FakeDiscovery{Fake: &c.Fake}
}
var _ clientset.Interface = &Clientset{}
// Core retrieves the CoreClient
func (c *Clientset) Core() v1core.CoreInterface {
return &fakev1core.FakeCore{&c.Fake}
return &fakev1core.FakeCore{Fake: &c.Fake}
}
// Extensions retrieves the ExtensionsClient
func (c *Clientset) Extensions() v1beta1extensions.ExtensionsInterface {
return &fakev1beta1extensions.FakeExtensions{&c.Fake}
return &fakev1beta1extensions.FakeExtensions{Fake: &c.Fake}
}

View File

@ -39,7 +39,7 @@ func WaitForVolumeStatus(t *testing.T, os *OpenStack, volumeName string, status
time.Sleep(1 * time.Second)
if timeout >= 0 && time.Now().Second()-start >= timeout {
t.Logf("Volume (%s) status did not change to %s after %s seconds\n",
t.Logf("Volume (%s) status did not change to %s after %v seconds\n",
volumeName,
status,
timeout)
@ -51,7 +51,7 @@ func WaitForVolumeStatus(t *testing.T, os *OpenStack, volumeName string, status
t.Fatalf("Cannot get existing Cinder volume (%s): %v", volumeName, err)
}
if getVol.Status == status {
t.Logf("Volume (%s) status changed to %s after %s seconds\n",
t.Logf("Volume (%s) status changed to %s after %v seconds\n",
volumeName,
status,
timeout)

View File

@ -179,7 +179,7 @@ func TestCNIPlugin(t *testing.T) {
t.Fatalf("Failed to select the desired plugin: %v", err)
}
err = plug.SetUpPod("podNamespace", "podName", kubecontainer.ContainerID{"docker", "test_infra_container"})
err = plug.SetUpPod("podNamespace", "podName", kubecontainer.ContainerID{Type: "docker", ID: "test_infra_container"})
if err != nil {
t.Errorf("Expected nil: %v", err)
}
@ -194,7 +194,7 @@ func TestCNIPlugin(t *testing.T) {
if string(output) != expectedOutput {
t.Errorf("Mismatch in expected output for setup hook. Expected '%s', got '%s'", expectedOutput, string(output))
}
err = plug.TearDownPod("podNamespace", "podName", kubecontainer.ContainerID{"docker", "test_infra_container"})
err = plug.TearDownPod("podNamespace", "podName", kubecontainer.ContainerID{Type: "docker", ID: "test_infra_container"})
if err != nil {
t.Errorf("Expected nil: %v", err)
}

View File

@ -225,7 +225,7 @@ func TestPluginSetupHook(t *testing.T) {
plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, nettest.NewFakeHost(nil))
err = plug.SetUpPod("podNamespace", "podName", kubecontainer.ContainerID{"docker", "dockerid2345"})
err = plug.SetUpPod("podNamespace", "podName", kubecontainer.ContainerID{Type: "docker", ID: "dockerid2345"})
if err != nil {
t.Errorf("Expected nil: %v", err)
}
@ -253,7 +253,7 @@ func TestPluginTearDownHook(t *testing.T) {
plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, nettest.NewFakeHost(nil))
err = plug.TearDownPod("podNamespace", "podName", kubecontainer.ContainerID{"docker", "dockerid2345"})
err = plug.TearDownPod("podNamespace", "podName", kubecontainer.ContainerID{Type: "docker", ID: "dockerid2345"})
if err != nil {
t.Errorf("Expected nil")
}
@ -281,7 +281,7 @@ func TestPluginStatusHook(t *testing.T) {
plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, nettest.NewFakeHost(nil))
ip, err := plug.Status("namespace", "name", kubecontainer.ContainerID{"docker", "dockerid2345"})
ip, err := plug.Status("namespace", "name", kubecontainer.ContainerID{Type: "docker", ID: "dockerid2345"})
if err != nil {
t.Errorf("Expected nil got %v", err)
}
@ -320,7 +320,7 @@ func TestPluginStatusHookIPv6(t *testing.T) {
t.Errorf("InitNetworkPlugin() failed: %v", err)
}
ip, err := plug.Status("namespace", "name", kubecontainer.ContainerID{"docker", "dockerid2345"})
ip, err := plug.Status("namespace", "name", kubecontainer.ContainerID{Type: "docker", ID: "dockerid2345"})
if err != nil {
t.Errorf("Status() failed: %v", err)
}

View File

@ -68,7 +68,7 @@ func (c *VersionCache) Get(key string) (kubecontainer.Version, kubecontainer.Ver
defer c.lock.RUnlock()
value, ok := c.cache[key]
if !ok {
return nil, nil, fmt.Errorf("Failed to get version info from cache by key: ", key)
return nil, nil, fmt.Errorf("Failed to get version info from cache by key: %v", key)
}
return value.apiVersion, value.version, nil
}

View File

@ -282,8 +282,8 @@ func TestDeleteEndpointConnections(t *testing.T) {
}
serviceMap := make(map[proxy.ServicePortName]*serviceInfo)
svc1 := proxy.ServicePortName{types.NamespacedName{Namespace: "ns1", Name: "svc1"}, ""}
svc2 := proxy.ServicePortName{types.NamespacedName{Namespace: "ns1", Name: "svc2"}, ""}
svc1 := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: "svc1"}, Port: ""}
svc2 := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: "svc2"}, Port: ""}
serviceMap[svc1] = newFakeServiceInfo(svc1, net.IPv4(10, 20, 30, 40), api.ProtocolUDP)
serviceMap[svc2] = newFakeServiceInfo(svc1, net.IPv4(10, 20, 30, 41), api.ProtocolTCP)

View File

@ -386,13 +386,13 @@ func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind s
break
case kindDeployment:
dpConfig := framework.DeploymentConfig{
rcConfig,
RCConfig: rcConfig,
}
framework.ExpectNoError(framework.RunDeployment(dpConfig))
break
case kindReplicaSet:
rsConfig := framework.ReplicaSetConfig{
rcConfig,
RCConfig: rcConfig,
}
framework.ExpectNoError(framework.RunReplicaSet(rsConfig))
break

View File

@ -478,11 +478,11 @@ var _ = framework.KubeDescribe("Density", func() {
node, ok := nodes[name]
Expect(ok).To(Equal(true))
scheduleLag = append(scheduleLag, framework.PodLatencyData{name, node, sched.Time.Sub(create.Time)})
startupLag = append(startupLag, framework.PodLatencyData{name, node, run.Time.Sub(sched.Time)})
watchLag = append(watchLag, framework.PodLatencyData{name, node, watch.Time.Sub(run.Time)})
schedToWatchLag = append(schedToWatchLag, framework.PodLatencyData{name, node, watch.Time.Sub(sched.Time)})
e2eLag = append(e2eLag, framework.PodLatencyData{name, node, watch.Time.Sub(create.Time)})
scheduleLag = append(scheduleLag, framework.PodLatencyData{Name: name, Node: node, Latency: sched.Time.Sub(create.Time)})
startupLag = append(startupLag, framework.PodLatencyData{Name: name, Node: node, Latency: run.Time.Sub(sched.Time)})
watchLag = append(watchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(run.Time)})
schedToWatchLag = append(schedToWatchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(sched.Time)})
e2eLag = append(e2eLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(create.Time)})
}
sort.Sort(framework.LatencySlice(scheduleLag))

View File

@ -566,7 +566,7 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]ap
// Keep trying...
return false, nil
})
Logf("WaitFor completed. Pods found = %v out of %v", timeout, len(pods), atLeast)
Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
return pods, err
}

View File

@ -158,7 +158,7 @@ func (es *e2eService) startServer(cmd *healthCheckCommand) error {
go func() {
err := cmd.Run()
if err != nil {
cmdErrorChan <- fmt.Errorf("%s Failed with error \"%v\". Command output:\n%s", cmd, err, *cmd.OutputBuffer)
cmdErrorChan <- fmt.Errorf("%s Failed with error \"%v\". Command output:\n%v", cmd, err, *cmd.OutputBuffer)
}
close(cmdErrorChan)
}()