mirror of https://github.com/k3s-io/k3s
Correcting all go vet errors
parent
9fdd793555
commit
565189f5b8
|
@ -12323,7 +12323,8 @@
|
|||
},
|
||||
"activeDeadlineSeconds": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
"format": "int64",
|
||||
"description": "optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers; value must be a positive integer"
|
||||
},
|
||||
"dnsPolicy": {
|
||||
"type": "string",
|
||||
|
|
|
@ -265,5 +265,4 @@ func (s *CMServer) Run(_ []string) error {
|
|||
).Run()
|
||||
|
||||
select {}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@ func (s *State) serveStatus(w http.ResponseWriter, r *http.Request) {
|
|||
fmt.Fprintf(w, "running")
|
||||
return
|
||||
}
|
||||
s.Logf("Declaring failure for %s/%s with %d sent and %d received and %d peers", *namespace, *service, s.Sent, s.Received, *peerCount)
|
||||
s.Logf("Declaring failure for %s/%s with %d sent and %d received and %d peers", *namespace, *service, len(s.Sent), len(s.Received), *peerCount)
|
||||
fmt.Fprintf(w, "fail")
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ func envInt(key string, def int) int {
|
|||
if env := os.Getenv(key); env != "" {
|
||||
val, err := strconv.Atoi(env)
|
||||
if err != nil {
|
||||
log.Println("invalid value for %q: using default: %q", key, def)
|
||||
log.Printf("invalid value for %q: using default: %q", key, def)
|
||||
return def
|
||||
}
|
||||
return val
|
||||
|
|
|
@ -84,7 +84,7 @@ func main() {
|
|||
log.Fatalf("failed to resolve make.bash path: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(makeBash); os.IsNotExist(err) {
|
||||
log.Fatalf("`make.bash` not found %q: %v", err)
|
||||
log.Fatalf("`make.bash` not found %q: %v", makeBash, err)
|
||||
}
|
||||
makeBashCmd := fmt.Sprintf("(cd %s; GOOS=linux GOARCH=amd64 ./make.bash --no-clean)", filepath.Dir(makeBash))
|
||||
log.Fatalf("`go %s` toolchain not found: %v, run: %q", "linux_amd64", crossErr, makeBashCmd)
|
||||
|
@ -127,7 +127,7 @@ func main() {
|
|||
|
||||
imageIDBytes := make([]byte, 32)
|
||||
if _, err := rand.Read(imageIDBytes); err != nil {
|
||||
log.Fatalf("failed to generate ID: %v")
|
||||
log.Fatalf("failed to generate ID: %v", err)
|
||||
}
|
||||
imageID := hex.EncodeToString(imageIDBytes)
|
||||
repo := map[string]map[string]string{
|
||||
|
|
|
@ -214,7 +214,7 @@ func (ms *MinionServer) launchHyperkubeServer(server string, args *[]string, log
|
|||
}
|
||||
close(ch)
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Error("%v exited with error: %v", server, err)
|
||||
log.Errorf("%v exited with error: %v", server, err)
|
||||
err = fmt.Errorf("%v exited with error: %v", server, err)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -76,16 +76,16 @@ func (mode RedirectMode) Redirect(nonblock, changemode bool, fd FileDescriptor,
|
|||
// Opens file in read-only, non-blocking mode. Returns a valid fd number if it succeeds, or -1 (and sets errno) if it fails.
|
||||
fdr, e2 := open(name, syscall.O_RDONLY|syscall.O_NONBLOCK, 0)
|
||||
if e2 != nil {
|
||||
return nil, &os.PathError{"open_read", name, e2}
|
||||
return nil, &os.PathError{Op: "open_read", Path: name, Err: e2}
|
||||
}
|
||||
fd2, e = open(name, flags, 0666)
|
||||
fd_close(fdr)
|
||||
}
|
||||
if e != nil {
|
||||
return nil, &os.PathError{"open", name, e}
|
||||
return nil, &os.PathError{Op: "open", Path: name, Err: e}
|
||||
}
|
||||
if e = fd_move(fd, fd2); e != nil {
|
||||
return nil, &os.PathError{"fd_move", name, e}
|
||||
return nil, &os.PathError{Op: "fd_move", Path: name, Err: e}
|
||||
}
|
||||
if changemode {
|
||||
if nonblock {
|
||||
|
@ -94,7 +94,7 @@ func (mode RedirectMode) Redirect(nonblock, changemode bool, fd FileDescriptor,
|
|||
e = ndelay_on(fd)
|
||||
}
|
||||
if e != nil {
|
||||
return nil, &os.PathError{"ndelay", name, e}
|
||||
return nil, &os.PathError{Op: "ndelay", Path: name, Err: e}
|
||||
}
|
||||
}
|
||||
return os.NewFile(uintptr(fd2), name), nil
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// +build unit_test
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
|
@ -14,8 +16,6 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +build unit_test
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
|
|
|
@ -125,11 +125,11 @@ type service struct {
|
|||
// loadBalancerConfig represents loadbalancer specific configuration. Eventually
|
||||
// kubernetes will have an api for l7 loadbalancing.
|
||||
type loadBalancerConfig struct {
|
||||
Name string `json: "name", description: Name of the load balancer, eg: haproxy.`
|
||||
ReloadCmd string `json: "reloadCmd", description: command used to reload the load balancer.`
|
||||
Config string `json: "config", description: path to loadbalancers configuration file.`
|
||||
Template string `json: "template", "template for the load balancer config."`
|
||||
Algorithm string `json: "algorithm", description: loadbalancing algorithm.`
|
||||
Name string `json:"name" description:"Name of the load balancer, eg: haproxy."`
|
||||
ReloadCmd string `json:"reloadCmd" description:"command used to reload the load balancer."`
|
||||
Config string `json:"config" description:"path to loadbalancers configuration file."`
|
||||
Template string `json:"template" description:"template for the load balancer config."`
|
||||
Algorithm string `json:"algorithm" description:"loadbalancing algorithm."`
|
||||
}
|
||||
|
||||
// write writes the configuration file, will write to stdout if dryRun == true
|
||||
|
|
|
@ -336,5 +336,4 @@ func WaitForPending(client *github.Client, user, project string, prNumber int) e
|
|||
glog.V(4).Info("PR is not pending, waiting for 30 seconds")
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ type Build struct {
|
|||
type Job struct {
|
||||
Result string `json:"result"`
|
||||
ID string `json:"id"`
|
||||
Timestamp int `json:timestamp`
|
||||
Timestamp int `json:"timestamp"`
|
||||
}
|
||||
|
||||
func (j *JenkinsClient) request(path string) ([]byte, error) {
|
||||
|
|
|
@ -150,7 +150,7 @@ func LLENHandler(rw http.ResponseWriter, req *http.Request) {
|
|||
infoL := HandleError(pool.Get(0).Do("LLEN", "k8petstore")).(int64)
|
||||
fmt.Printf("=========== LLEN is %d ", infoL)
|
||||
lengthJSON := HandleError(json.MarshalIndent(infoL, "", " ")).([]byte)
|
||||
fmt.Printf("================ LLEN json is %s", infoL)
|
||||
fmt.Printf("================ LLEN json is %d", infoL)
|
||||
|
||||
print("RETURN LEN = " + string(lengthJSON))
|
||||
rw.Write(lengthJSON)
|
||||
|
|
|
@ -54,8 +54,8 @@ func TestSemantic(t *testing.T) {
|
|||
{resource.Quantity{}, resource.MustParse("0"), true},
|
||||
{resource.Quantity{}, resource.MustParse("1m"), false},
|
||||
{
|
||||
resource.Quantity{inf.NewDec(5, 0), resource.BinarySI},
|
||||
resource.Quantity{inf.NewDec(5, 0), resource.DecimalSI},
|
||||
resource.Quantity{Amount: inf.NewDec(5, 0), Format: resource.BinarySI},
|
||||
resource.Quantity{Amount: inf.NewDec(5, 0), Format: resource.DecimalSI},
|
||||
true,
|
||||
},
|
||||
{resource.MustParse("2m"), resource.MustParse("1m"), false},
|
||||
|
@ -138,7 +138,7 @@ func TestAddToNodeAddresses(t *testing.T) {
|
|||
for i, tc := range testCases {
|
||||
AddToNodeAddresses(&tc.existing, tc.toAdd...)
|
||||
if !Semantic.DeepEqual(tc.expected, tc.existing) {
|
||||
t.Error("case[%d], expected: %v, got: %v", i, tc.expected, tc.existing)
|
||||
t.Errorf("case[%d], expected: %v, got: %v", i, tc.expected, tc.existing)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,10 +33,10 @@ func TestResourceHelpers(t *testing.T) {
|
|||
},
|
||||
}
|
||||
if res := resourceSpec.Limits.Cpu(); *res != cpuLimit {
|
||||
t.Errorf("expected cpulimit %d, got %d", cpuLimit, res)
|
||||
t.Errorf("expected cpulimit %v, got %v", cpuLimit, res)
|
||||
}
|
||||
if res := resourceSpec.Limits.Memory(); *res != memoryLimit {
|
||||
t.Errorf("expected memorylimit %d, got %d", memoryLimit, res)
|
||||
t.Errorf("expected memorylimit %v, got %v", memoryLimit, res)
|
||||
}
|
||||
resourceSpec = ResourceRequirements{
|
||||
Limits: ResourceList{
|
||||
|
@ -45,9 +45,9 @@ func TestResourceHelpers(t *testing.T) {
|
|||
},
|
||||
}
|
||||
if res := resourceSpec.Limits.Cpu(); res.Value() != 0 {
|
||||
t.Errorf("expected cpulimit %d, got %d", 0, res)
|
||||
t.Errorf("expected cpulimit %v, got %v", 0, res)
|
||||
}
|
||||
if res := resourceSpec.Limits.Memory(); *res != memoryLimit {
|
||||
t.Errorf("expected memorylimit %d, got %d", memoryLimit, res)
|
||||
t.Errorf("expected memorylimit %v, got %v", memoryLimit, res)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -358,14 +358,14 @@ func TestSetDefaulServiceTargetPort(t *testing.T) {
|
|||
obj := roundTrip(t, runtime.Object(in))
|
||||
out := obj.(*versioned.Service)
|
||||
if out.Spec.Ports[0].TargetPort != util.NewIntOrStringFromInt(1234) {
|
||||
t.Errorf("Expected TargetPort to be defaulted, got %s", out.Spec.Ports[0].TargetPort)
|
||||
t.Errorf("Expected TargetPort to be defaulted, got %v", out.Spec.Ports[0].TargetPort)
|
||||
}
|
||||
|
||||
in = &versioned.Service{Spec: versioned.ServiceSpec{Ports: []versioned.ServicePort{{Port: 1234, TargetPort: util.NewIntOrStringFromInt(5678)}}}}
|
||||
obj = roundTrip(t, runtime.Object(in))
|
||||
out = obj.(*versioned.Service)
|
||||
if out.Spec.Ports[0].TargetPort != util.NewIntOrStringFromInt(5678) {
|
||||
t.Errorf("Expected TargetPort to be unchanged, got %s", out.Spec.Ports[0].TargetPort)
|
||||
t.Errorf("Expected TargetPort to be unchanged, got %v", out.Spec.Ports[0].TargetPort)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -382,13 +382,13 @@ func TestSetDefaultServicePort(t *testing.T) {
|
|||
t.Errorf("Expected protocol %s, got %s", versioned.ProtocolUDP, out.Spec.Ports[0].Protocol)
|
||||
}
|
||||
if out.Spec.Ports[0].TargetPort != util.NewIntOrStringFromString("p") {
|
||||
t.Errorf("Expected port %d, got %s", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort)
|
||||
t.Errorf("Expected port %v, got %v", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort)
|
||||
}
|
||||
if out.Spec.Ports[1].Protocol != versioned.ProtocolUDP {
|
||||
t.Errorf("Expected protocol %s, got %s", versioned.ProtocolUDP, out.Spec.Ports[1].Protocol)
|
||||
}
|
||||
if out.Spec.Ports[1].TargetPort != util.NewIntOrStringFromInt(309) {
|
||||
t.Errorf("Expected port %d, got %s", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort)
|
||||
t.Errorf("Expected port %v, got %v", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort)
|
||||
}
|
||||
|
||||
// Defaulted.
|
||||
|
@ -403,13 +403,13 @@ func TestSetDefaultServicePort(t *testing.T) {
|
|||
t.Errorf("Expected protocol %s, got %s", versioned.ProtocolTCP, out.Spec.Ports[0].Protocol)
|
||||
}
|
||||
if out.Spec.Ports[0].TargetPort != util.NewIntOrStringFromInt(in.Spec.Ports[0].Port) {
|
||||
t.Errorf("Expected port %d, got %d", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort)
|
||||
t.Errorf("Expected port %v, got %v", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort)
|
||||
}
|
||||
if out.Spec.Ports[1].Protocol != versioned.ProtocolTCP {
|
||||
t.Errorf("Expected protocol %s, got %s", versioned.ProtocolTCP, out.Spec.Ports[1].Protocol)
|
||||
}
|
||||
if out.Spec.Ports[1].TargetPort != util.NewIntOrStringFromInt(in.Spec.Ports[1].Port) {
|
||||
t.Errorf("Expected port %d, got %d", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort)
|
||||
t.Errorf("Expected port %v, got %v", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -873,7 +873,7 @@ type PodSpec struct {
|
|||
// a termination signal and the time when the processes are forcibly halted with a kill signal.
|
||||
// Set this value longer than the expected cleanup time for your process.
|
||||
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" description:"optional duration in seconds the pod needs to terminate gracefully; may be decreased in delete request; value must be non-negative integer; the value zero indicates delete immediately; if this value is not set, the default grace period will be used instead; the grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal; set this value longer than the expected cleanup time for your process"`
|
||||
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" description:"optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers; value must be a positive integer`
|
||||
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" description:"optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers; value must be a positive integer"`
|
||||
// Optional: Set DNS policy. Defaults to "ClusterFirst"
|
||||
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" description:"DNS policy for containers within the pod; one of 'ClusterFirst' or 'Default'"`
|
||||
// NodeSelector is a selector which must be true for the pod to fit on a node
|
||||
|
|
|
@ -448,16 +448,16 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
|||
|
||||
func TestValidateVolumes(t *testing.T) {
|
||||
successCase := []api.Volume{
|
||||
{Name: "abc", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{"/mnt/path1"}}},
|
||||
{Name: "123", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{"/mnt/path2"}}},
|
||||
{Name: "abc-123", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{"/mnt/path3"}}},
|
||||
{Name: "abc", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/mnt/path1"}}},
|
||||
{Name: "123", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/mnt/path2"}}},
|
||||
{Name: "abc-123", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/mnt/path3"}}},
|
||||
{Name: "empty", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
|
||||
{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{"my-PD", "ext4", 1, false}}},
|
||||
{Name: "awsebs", VolumeSource: api.VolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{"my-PD", "ext4", 1, false}}},
|
||||
{Name: "gitrepo", VolumeSource: api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{"my-repo", "hashstring"}}},
|
||||
{Name: "iscsidisk", VolumeSource: api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"127.0.0.1", "iqn.2015-02.example.com:test", 1, "ext4", false}}},
|
||||
{Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{"my-secret"}}},
|
||||
{Name: "glusterfs", VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"host1", "path", false}}},
|
||||
{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{PDName: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}},
|
||||
{Name: "awsebs", VolumeSource: api.VolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{VolumeID: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}},
|
||||
{Name: "gitrepo", VolumeSource: api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{Repository: "my-repo", Revision: "hashstring"}}},
|
||||
{Name: "iscsidisk", VolumeSource: api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{TargetPortal: "127.0.0.1", IQN: "iqn.2015-02.example.com:test", Lun: 1, FSType: "ext4", ReadOnly: false}}},
|
||||
{Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "my-secret"}}},
|
||||
{Name: "glusterfs", VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "host1", Path: "path", ReadOnly: false}}},
|
||||
{Name: "rbd", VolumeSource: api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "bar", FSType: "ext4"}}},
|
||||
}
|
||||
names, errs := validateVolumes(successCase)
|
||||
|
@ -468,10 +468,10 @@ func TestValidateVolumes(t *testing.T) {
|
|||
t.Errorf("wrong names result: %v", names)
|
||||
}
|
||||
emptyVS := api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}
|
||||
emptyPortal := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"", "iqn.2015-02.example.com:test", 1, "ext4", false}}
|
||||
emptyIQN := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"127.0.0.1", "", 1, "ext4", false}}
|
||||
emptyHosts := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"", "path", false}}
|
||||
emptyPath := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"host", "", false}}
|
||||
emptyPortal := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{TargetPortal: "", IQN: "iqn.2015-02.example.com:test", Lun: 1, FSType: "ext4", ReadOnly: false}}
|
||||
emptyIQN := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{TargetPortal: "127.0.0.1", IQN: "", Lun: 1, FSType: "ext4", ReadOnly: false}}
|
||||
emptyHosts := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "", Path: "path", ReadOnly: false}}
|
||||
emptyPath := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "host", Path: "", ReadOnly: false}}
|
||||
emptyMon := api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{}, RBDImage: "bar", FSType: "ext4"}}
|
||||
emptyImage := api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "", FSType: "ext4"}}
|
||||
errorCases := map[string]struct {
|
||||
|
@ -1979,7 +1979,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
|
|||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
Volumes: []api.Volume{{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{"my-PD", "ext4", 1, false}}}},
|
||||
Volumes: []api.Volume{{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{PDName: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -2140,7 +2140,7 @@ func TestValidateReplicationController(t *testing.T) {
|
|||
Labels: validSelector,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{"my-PD", "ext4", 1, false}}}},
|
||||
Volumes: []api.Volume{{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{PDName: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
|
@ -2361,7 +2361,7 @@ func TestValidateDaemonUpdate(t *testing.T) {
|
|||
Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
}
|
||||
validPodSpecVolume := api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{"my-PD", "ext4", 1, false}}}},
|
||||
Volumes: []api.Volume{{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{PDName: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}}},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
|
@ -3182,7 +3182,7 @@ func TestValidateResourceNames(t *testing.T) {
|
|||
for i := range err {
|
||||
detail := err[i].(*errors.ValidationError).Detail
|
||||
if detail != "" && detail != qualifiedNameErrorMsg {
|
||||
t.Errorf("%s: expected error detail either empty or %s, got %s", k, qualifiedNameErrorMsg, detail)
|
||||
t.Errorf("%d: expected error detail either empty or %s, got %s", k, qualifiedNameErrorMsg, detail)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -195,7 +195,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
|
|||
versionedDeleterObject = indirectArbitraryPointer(objectPtr)
|
||||
isDeleter = true
|
||||
case isDeleter:
|
||||
gracefulDeleter = rest.GracefulDeleteAdapter{deleter}
|
||||
gracefulDeleter = rest.GracefulDeleteAdapter{Deleter: deleter}
|
||||
}
|
||||
|
||||
versionedStatusPtr, err := a.group.Creater.New(serverVersion, "Status")
|
||||
|
|
|
@ -17,10 +17,11 @@ limitations under the License.
|
|||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
func TestTTLExpirationBasic(t *testing.T) {
|
||||
|
@ -111,7 +112,7 @@ func TestTTLPolicy(t *testing.T) {
|
|||
exactlyOnTTL := fakeTime.Add(-ttl)
|
||||
expiredTime := fakeTime.Add(-(ttl + 1))
|
||||
|
||||
policy := TTLPolicy{ttl, &util.FakeClock{fakeTime}}
|
||||
policy := TTLPolicy{ttl, &util.FakeClock{Time: fakeTime}}
|
||||
fakeTimestampedEntry := ×tampedEntry{obj: struct{}{}, timestamp: exactlyOnTTL}
|
||||
if policy.IsExpired(fakeTimestampedEntry) {
|
||||
t.Errorf("TTL cache should not expire entries exactly on ttl")
|
||||
|
|
|
@ -86,7 +86,7 @@ func (w *WatchCache) Add(obj interface{}) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
event := watch.Event{watch.Added, object}
|
||||
event := watch.Event{Type: watch.Added, Object: object}
|
||||
|
||||
f := func(obj runtime.Object) error { return w.store.Add(obj) }
|
||||
return w.processEvent(event, resourceVersion, f)
|
||||
|
@ -97,7 +97,7 @@ func (w *WatchCache) Update(obj interface{}) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
event := watch.Event{watch.Modified, object}
|
||||
event := watch.Event{Type: watch.Modified, Object: object}
|
||||
|
||||
f := func(obj runtime.Object) error { return w.store.Update(obj) }
|
||||
return w.processEvent(event, resourceVersion, f)
|
||||
|
@ -108,7 +108,7 @@ func (w *WatchCache) Delete(obj interface{}) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
event := watch.Event{watch.Deleted, object}
|
||||
event := watch.Event{Type: watch.Deleted, Object: object}
|
||||
|
||||
f := func(obj runtime.Object) error { return w.store.Delete(obj) }
|
||||
return w.processEvent(event, resourceVersion, f)
|
||||
|
|
|
@ -269,7 +269,7 @@ func TestGetListener(t *testing.T) {
|
|||
}
|
||||
|
||||
if listener == nil {
|
||||
t.Errorf("Test case #%d did not raised an error (%t) but failed in initializing listener", i, err)
|
||||
t.Errorf("Test case #%d did not raise an error but failed in initializing listener", i)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -600,8 +600,6 @@ func (r *Request) Stream() (io.ReadCloser, error) {
|
|||
bodyText := string(bodyBytes)
|
||||
return nil, fmt.Errorf("%s while accessing %v: %s", resp.Status, url, bodyText)
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// Upgrade upgrades the request so that it supports multiplexed bidirectional
|
||||
|
|
|
@ -1187,7 +1187,7 @@ func TestWatch(t *testing.T) {
|
|||
|
||||
encoder := watchjson.NewEncoder(w, latest.Codec)
|
||||
for _, item := range table {
|
||||
if err := encoder.Encode(&watch.Event{item.t, item.obj}); err != nil {
|
||||
if err := encoder.Encode(&watch.Event{Type: item.t, Object: item.obj}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
flusher.Flush()
|
||||
|
|
|
@ -76,8 +76,6 @@ func ObjectReaction(o ObjectRetriever, mapper meta.RESTMapper) ReactionFunc {
|
|||
default:
|
||||
return nil, fmt.Errorf("no reaction implemented for %s", action)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -175,7 +173,7 @@ func (o objects) Kind(kind, name string) (runtime.Object, error) {
|
|||
status.Details.Kind = kind
|
||||
}
|
||||
if status.Status != api.StatusSuccess {
|
||||
return nilValue, &errors.StatusError{*status}
|
||||
return nilValue, &errors.StatusError{ErrStatus: *status}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ package aws_cloud
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
@ -68,7 +69,7 @@ func (s *AWSCloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error
|
|||
}
|
||||
instanceName := orEmpty(instance.PrivateDNSName)
|
||||
routeName := clusterName + "-" + destinationCIDR
|
||||
routes = append(routes, &cloudprovider.Route{routeName, instanceName, destinationCIDR})
|
||||
routes = append(routes, &cloudprovider.Route{Name: routeName, TargetInstance: instanceName, DestinationCIDR: destinationCIDR})
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
|
|
|
@ -476,12 +476,12 @@ func (gce *GCECloud) UpdateTCPLoadBalancer(name, region string, hosts []string)
|
|||
for _, host := range hosts {
|
||||
link := makeComparableHostPath(gce.zone, host)
|
||||
if !existing.Has(link) {
|
||||
toAdd = append(toAdd, &compute.InstanceReference{link})
|
||||
toAdd = append(toAdd, &compute.InstanceReference{Instance: link})
|
||||
}
|
||||
existing.Delete(link)
|
||||
}
|
||||
for link := range existing {
|
||||
toRemove = append(toRemove, &compute.InstanceReference{link})
|
||||
toRemove = append(toRemove, &compute.InstanceReference{Instance: link})
|
||||
}
|
||||
|
||||
if len(toAdd) > 0 {
|
||||
|
@ -778,7 +778,7 @@ func (gce *GCECloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, err
|
|||
}
|
||||
|
||||
target := path.Base(r.NextHopInstance)
|
||||
routes = append(routes, &cloudprovider.Route{r.Name, target, r.DestRange})
|
||||
routes = append(routes, &cloudprovider.Route{Name: r.Name, TargetInstance: target, DestinationCIDR: r.DestRange})
|
||||
}
|
||||
return routes, nil
|
||||
}
|
||||
|
|
|
@ -39,8 +39,8 @@ import (
|
|||
// NewFakeControllerExpectationsLookup creates a fake store for PodExpectations.
|
||||
func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *util.FakeClock) {
|
||||
fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
|
||||
fakeClock := &util.FakeClock{fakeTime}
|
||||
ttlPolicy := &cache.TTLPolicy{ttl, fakeClock}
|
||||
fakeClock := &util.FakeClock{Time: fakeTime}
|
||||
ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock}
|
||||
ttlStore := cache.NewFakeExpirationStore(
|
||||
ExpKeyFunc, nil, ttlPolicy, fakeClock)
|
||||
return &ControllerExpectations{ttlStore}, fakeClock
|
||||
|
|
|
@ -52,40 +52,40 @@ type nnu struct {
|
|||
// Add adds an object to the set and sends an add event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Add(obj runtime.Object) {
|
||||
f.Change(watch.Event{watch.Added, obj}, 1)
|
||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 1)
|
||||
}
|
||||
|
||||
// Modify updates an object in the set and sends a modified event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Modify(obj runtime.Object) {
|
||||
f.Change(watch.Event{watch.Modified, obj}, 1)
|
||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 1)
|
||||
}
|
||||
|
||||
// Delete deletes an object from the set and sends a delete event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Delete(lastValue runtime.Object) {
|
||||
f.Change(watch.Event{watch.Deleted, lastValue}, 1)
|
||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 1)
|
||||
}
|
||||
|
||||
// AddDropWatch adds an object to the set but forgets to send an add event to
|
||||
// watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) AddDropWatch(obj runtime.Object) {
|
||||
f.Change(watch.Event{watch.Added, obj}, 0)
|
||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 0)
|
||||
}
|
||||
|
||||
// ModifyDropWatch updates an object in the set but forgets to send a modify
|
||||
// event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) ModifyDropWatch(obj runtime.Object) {
|
||||
f.Change(watch.Event{watch.Modified, obj}, 0)
|
||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 0)
|
||||
}
|
||||
|
||||
// DeleteDropWatch deletes an object from the set but forgets to send a delete
|
||||
// event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) {
|
||||
f.Change(watch.Event{watch.Deleted, lastValue}, 0)
|
||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0)
|
||||
}
|
||||
|
||||
func (f *FakeControllerSource) key(meta *api.ObjectMeta) nnu {
|
||||
|
@ -170,7 +170,7 @@ func (f *FakeControllerSource) Watch(resourceVersion string) (watch.Interface, e
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
changes = append(changes, watch.Event{c.Type, objCopy.(runtime.Object)})
|
||||
changes = append(changes, watch.Event{Type: c.Type, Object: objCopy.(runtime.Object)})
|
||||
}
|
||||
return f.broadcaster.WatchWithPrefix(changes), nil
|
||||
} else if rc > len(f.changes) {
|
||||
|
|
|
@ -479,7 +479,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
Status: api.ConditionUnknown,
|
||||
Reason: fmt.Sprintf("Kubelet stopped posting node status."),
|
||||
LastHeartbeatTime: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Time{util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
|
||||
LastTransitionTime: util.Time{Time: util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
|
||||
},
|
||||
},
|
||||
Capacity: api.ResourceList{
|
||||
|
|
|
@ -234,7 +234,7 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
|
|||
// volumes are removed by processes external to this binder and must be removed from the cluster
|
||||
case api.VolumeFailed:
|
||||
if volume.Spec.ClaimRef == nil {
|
||||
return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume)
|
||||
return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume)
|
||||
} else {
|
||||
glog.V(5).Infof("PersistentVolume[%s] previously failed recycling. Skipping.\n", volume.Name)
|
||||
}
|
||||
|
|
|
@ -183,7 +183,7 @@ func TestBindingWithExamples(t *testing.T) {
|
|||
pv, err := client.PersistentVolumes().Get("any")
|
||||
pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimRecycle
|
||||
if err != nil {
|
||||
t.Error("Unexpected error getting PV from client: %v", err)
|
||||
t.Errorf("Unexpected error getting PV from client: %v", err)
|
||||
}
|
||||
|
||||
claim, error := client.PersistentVolumeClaims("ns").Get("any")
|
||||
|
@ -285,7 +285,7 @@ func TestMissingFromIndex(t *testing.T) {
|
|||
|
||||
pv, err := client.PersistentVolumes().Get("any")
|
||||
if err != nil {
|
||||
t.Error("Unexpected error getting PV from client: %v", err)
|
||||
t.Errorf("Unexpected error getting PV from client: %v", err)
|
||||
}
|
||||
|
||||
claim, error := client.PersistentVolumeClaims("ns").Get("any")
|
||||
|
|
|
@ -200,7 +200,7 @@ func TestSort(t *testing.T) {
|
|||
|
||||
for i, expected := range []string{"gce-pd-1", "gce-pd-5", "gce-pd-10"} {
|
||||
if string(volumes[i].UID) != expected {
|
||||
t.Error("Incorrect ordering of persistent volumes. Expected %s but got %s", expected, volumes[i].UID)
|
||||
t.Errorf("Incorrect ordering of persistent volumes. Expected %s but got %s", expected, volumes[i].UID)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -211,7 +211,7 @@ func TestSort(t *testing.T) {
|
|||
|
||||
for i, expected := range []string{"nfs-1", "nfs-5", "nfs-10"} {
|
||||
if string(volumes[i].UID) != expected {
|
||||
t.Error("Incorrect ordering of persistent volumes. Expected %s but got %s", expected, volumes[i].UID)
|
||||
t.Errorf("Incorrect ordering of persistent volumes. Expected %s but got %s", expected, volumes[i].UID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -135,7 +135,7 @@ func (recycler *PersistentVolumeRecycler) handleRecycle(pv *api.PersistentVolume
|
|||
// blocks until completion
|
||||
err = volRecycler.Recycle()
|
||||
if err != nil {
|
||||
glog.Errorf("PersistentVolume[%s] failed recycling: %+v", err)
|
||||
glog.Errorf("PersistentVolume[%s] failed recycling: %+v", pv.Name, err)
|
||||
pv.Status.Message = fmt.Sprintf("Recycling error: %s", err)
|
||||
nextPhase = api.VolumeFailed
|
||||
} else {
|
||||
|
|
|
@ -177,7 +177,7 @@ func NewReplicationManager(kubeClient client.Interface, burstReplicas int) *Repl
|
|||
func (rm *ReplicationManager) SetEventRecorder(recorder record.EventRecorder) {
|
||||
// TODO: Hack. We can't cleanly shutdown the event recorder, so benchmarks
|
||||
// need to pass in a fake.
|
||||
rm.podControl = controller.RealPodControl{rm.kubeClient, recorder}
|
||||
rm.podControl = controller.RealPodControl{KubeClient: rm.kubeClient, Recorder: recorder}
|
||||
}
|
||||
|
||||
// Run begins watching and syncing.
|
||||
|
|
|
@ -689,7 +689,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
|||
return &api.ReplicationController{}, fmt.Errorf("Fake error")
|
||||
},
|
||||
}
|
||||
fakeRCClient := &testclient.FakeReplicationControllers{fakeClient, "default"}
|
||||
fakeRCClient := &testclient.FakeReplicationControllers{Fake: fakeClient, Namespace: "default"}
|
||||
numReplicas := 10
|
||||
updateReplicaCount(fakeRCClient, *rc, numReplicas)
|
||||
updates, gets := 0, 0
|
||||
|
|
|
@ -54,8 +54,6 @@ func updateReplicaCount(rcClient client.ReplicationControllerInterface, controll
|
|||
return getErr
|
||||
}
|
||||
}
|
||||
// Failed 2 updates one of which was with the latest controller, return the update error
|
||||
return
|
||||
}
|
||||
|
||||
// OverlappingControllers sorts a list of controllers by creation timestamp, using their names as a tie breaker.
|
||||
|
|
|
@ -213,7 +213,7 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) {
|
|||
}
|
||||
service = cachedService.lastState
|
||||
delta.Object = cachedService.lastState
|
||||
namespacedName = types.NamespacedName{service.Namespace, service.Name}
|
||||
namespacedName = types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
|
||||
} else {
|
||||
namespacedName.Namespace = service.Namespace
|
||||
namespacedName.Name = service.Name
|
||||
|
@ -659,7 +659,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service,
|
|||
|
||||
// It's only an actual error if the load balancer still exists.
|
||||
if _, exists, err := s.balancer.GetTCPLoadBalancer(name, s.zone.Region); err != nil {
|
||||
glog.Errorf("External error while checking if TCP load balancer %q exists: name, %v")
|
||||
glog.Errorf("External error while checking if TCP load balancer %q exists: name, %v", name, err)
|
||||
} else if !exists {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
|
|||
controller.init()
|
||||
cloud.Calls = nil // ignore any cloud calls made in init()
|
||||
client.ClearActions() // ignore any client calls made in init()
|
||||
err, _ := controller.createLoadBalancerIfNeeded(types.NamespacedName{"foo", "bar"}, item.service, nil)
|
||||
err, _ := controller.createLoadBalancerIfNeeded(types.NamespacedName{Namespace: "foo", Name: "bar"}, item.service, nil)
|
||||
if !item.expectErr && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
} else if item.expectErr && err == nil {
|
||||
|
|
|
@ -58,7 +58,7 @@ func verifyDeepCopyFunctionSignature(ft reflect.Type) error {
|
|||
return fmt.Errorf("expected func, got: %v", ft)
|
||||
}
|
||||
if ft.NumIn() != 3 {
|
||||
return fmt.Errorf("expected three 'in' params, got $v", ft)
|
||||
return fmt.Errorf("expected three 'in' params, got %v", ft)
|
||||
}
|
||||
if ft.NumOut() != 1 {
|
||||
return fmt.Errorf("expected one 'out' param, got %v", ft)
|
||||
|
|
|
@ -247,7 +247,7 @@ func (s *scope) describe() (src, dest string) {
|
|||
|
||||
// error makes an error that includes information about where we were in the objects
|
||||
// we were asked to convert.
|
||||
func (s *scope) error(message string, args ...interface{}) error {
|
||||
func (s *scope) errorf(message string, args ...interface{}) error {
|
||||
srcPath, destPath := s.describe()
|
||||
where := fmt.Sprintf("converting %v to %v: ", srcPath, destPath)
|
||||
return fmt.Errorf(where+message, args...)
|
||||
|
@ -473,7 +473,7 @@ func (c *Converter) callCustom(sv, dv, custom reflect.Value, scope *scope) error
|
|||
}
|
||||
if !dv.CanAddr() {
|
||||
if !dv.CanSet() {
|
||||
return scope.error("can't addr or set dest.")
|
||||
return scope.errorf("can't addr or set dest.")
|
||||
}
|
||||
dvOrig := dv
|
||||
dv := reflect.New(dvOrig.Type())
|
||||
|
@ -527,11 +527,11 @@ func (c *Converter) defaultConvert(sv, dv reflect.Value, scope *scope) error {
|
|||
dt, st := dv.Type(), sv.Type()
|
||||
|
||||
if !dv.CanSet() {
|
||||
return scope.error("Cannot set dest. (Tried to deep copy something with unexported fields?)")
|
||||
return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)")
|
||||
}
|
||||
|
||||
if !scope.flags.IsSet(AllowDifferentFieldTypeNames) && c.nameFunc(dt) != c.nameFunc(st) {
|
||||
return scope.error(
|
||||
return scope.errorf(
|
||||
"type names don't match (%v, %v), and no conversion 'func (%v, %v) error' registered.",
|
||||
c.nameFunc(st), c.nameFunc(dt), st, dt)
|
||||
}
|
||||
|
@ -620,7 +620,7 @@ func (c *Converter) defaultConvert(sv, dv reflect.Value, scope *scope) error {
|
|||
dv.Set(reflect.ValueOf(tmpdv.Interface()))
|
||||
return nil
|
||||
default:
|
||||
return scope.error("couldn't copy '%v' into '%v'; didn't understand types", st, dt)
|
||||
return scope.errorf("couldn't copy '%v' into '%v'; didn't understand types", st, dt)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -762,9 +762,9 @@ func (c *Converter) convertKV(skv, dkv kvValue, scope *scope) error {
|
|||
case scope.flags.IsSet(IgnoreMissingFields):
|
||||
// No error.
|
||||
case scope.flags.IsSet(SourceToDest):
|
||||
return scope.error("%v not present in dest", dkey)
|
||||
return scope.errorf("%v not present in dest", dkey)
|
||||
default:
|
||||
return scope.error("%v not present in src", skey)
|
||||
return scope.errorf("%v not present in src", skey)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -427,7 +427,7 @@ func TestIsDefaultRegistryMatch(t *testing.T) {
|
|||
for _, sample := range samples {
|
||||
for expected, imageName := range sample {
|
||||
if got := isDefaultRegistryMatch(imageName); got != expected {
|
||||
t.Errorf("Expected '%s' to be %s, got %s", imageName, expected, got)
|
||||
t.Errorf("Expected '%s' to be %t, got %t", imageName, expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -267,7 +267,7 @@ func ExamplePrintReplicationControllerWithNamespace() {
|
|||
Name: "foo",
|
||||
Namespace: "beep",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
CreationTimestamp: util.Time{time.Now().AddDate(-10, 0, 0)},
|
||||
CreationTimestamp: util.Time{Time: time.Now().AddDate(-10, 0, 0)},
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: 1,
|
||||
|
@ -308,7 +308,7 @@ func ExamplePrintPodWithWideFormat() {
|
|||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test1",
|
||||
CreationTimestamp: util.Time{time.Now().AddDate(-10, 0, 0)},
|
||||
CreationTimestamp: util.Time{Time: time.Now().AddDate(-10, 0, 0)},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: make([]api.Container, 2),
|
||||
|
@ -345,7 +345,7 @@ func ExamplePrintServiceWithNamespacesAndLabels() {
|
|||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "svc1",
|
||||
Namespace: "ns1",
|
||||
CreationTimestamp: util.Time{time.Now().AddDate(-10, 0, 0)},
|
||||
CreationTimestamp: util.Time{Time: time.Now().AddDate(-10, 0, 0)},
|
||||
Labels: map[string]string{
|
||||
"l1": "value",
|
||||
},
|
||||
|
@ -366,7 +366,7 @@ func ExamplePrintServiceWithNamespacesAndLabels() {
|
|||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "svc2",
|
||||
Namespace: "ns2",
|
||||
CreationTimestamp: util.Time{time.Now().AddDate(-10, 0, 0)},
|
||||
CreationTimestamp: util.Time{Time: time.Now().AddDate(-10, 0, 0)},
|
||||
Labels: map[string]string{
|
||||
"l1": "dolla-bill-yall",
|
||||
},
|
||||
|
|
|
@ -203,5 +203,4 @@ func modifyConfig(curr reflect.Value, steps *navigationSteps, propertyValue stri
|
|||
}
|
||||
|
||||
panic(fmt.Errorf("Unrecognized type: %v", actualCurrValue))
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -187,7 +187,7 @@ func RunExpose(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str
|
|||
|
||||
// TODO: extract this flag to a central location, when such a location exists.
|
||||
if !cmdutil.GetFlagBool(cmd, "dry-run") {
|
||||
resourceMapper := &resource.Mapper{typer, mapper, f.ClientMapperForCommand()}
|
||||
resourceMapper := &resource.Mapper{ObjectTyper: typer, RESTMapper: mapper, ClientMapper: f.ClientMapperForCommand()}
|
||||
info, err := resourceMapper.InfoForObject(object)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -85,7 +85,7 @@ func TestValidateArgs(t *testing.T) {
|
|||
t.Errorf("unexpected error: %v (%s)", err, test.testName)
|
||||
}
|
||||
if err == nil && test.expectErr {
|
||||
t.Error("unexpected non-error (%s)", test.testName)
|
||||
t.Errorf("unexpected non-error (%s)", test.testName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri
|
|||
if currentSize != -1 && len(infos) > 1 {
|
||||
return fmt.Errorf("cannot use --current-replicas with multiple controllers")
|
||||
}
|
||||
precondition := &kubectl.ScalePrecondition{currentSize, resourceVersion}
|
||||
precondition := &kubectl.ScalePrecondition{Size: currentSize, ResourceVersion: resourceVersion}
|
||||
retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout)
|
||||
var waitForReplicas *kubectl.RetryParams
|
||||
if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 {
|
||||
|
|
|
@ -90,7 +90,7 @@ type Factory struct {
|
|||
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
|
||||
// if optionalClientConfig is not nil, then this factory will make use of it.
|
||||
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
|
||||
mapper := kubectl.ShortcutExpander{latest.RESTMapper}
|
||||
mapper := kubectl.ShortcutExpander{RESTMapper: latest.RESTMapper}
|
||||
|
||||
flags := pflag.NewFlagSet("", pflag.ContinueOnError)
|
||||
flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags
|
||||
|
@ -119,7 +119,7 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
|
|||
CheckErr(err)
|
||||
cmdApiVersion := cfg.Version
|
||||
|
||||
return kubectl.OutputVersionMapper{mapper, cmdApiVersion}, api.Scheme
|
||||
return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cmdApiVersion}, api.Scheme
|
||||
},
|
||||
Client: func() (*client.Client, error) {
|
||||
return clients.ClientForVersion("")
|
||||
|
|
|
@ -55,7 +55,7 @@ func AddSourceToErr(verb string, source string, err error) error {
|
|||
if statusError, ok := err.(*errors.StatusError); ok {
|
||||
status := statusError.Status()
|
||||
status.Message = fmt.Sprintf("error when %s %q: %v", verb, source, status.Message)
|
||||
return &errors.StatusError{status}
|
||||
return &errors.StatusError{ErrStatus: status}
|
||||
}
|
||||
return fmt.Errorf("error when %s %q: %v", verb, source, err)
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ func getFlag(cmd *cobra.Command, flag string) *pflag.Flag {
|
|||
func GetFlagString(cmd *cobra.Command, flag string) string {
|
||||
s, err := cmd.Flags().GetString(flag)
|
||||
if err != nil {
|
||||
glog.Fatalf("err %v accessing flag %s for command %s: %s", err, flag, cmd.Name())
|
||||
glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
@ -220,7 +220,7 @@ func GetFlagString(cmd *cobra.Command, flag string) string {
|
|||
func GetFlagStringSlice(cmd *cobra.Command, flag string) []string {
|
||||
s, err := cmd.Flags().GetStringSlice(flag)
|
||||
if err != nil {
|
||||
glog.Fatalf("err %v accessing flag %s for command %s: %s", err, flag, cmd.Name())
|
||||
glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ func GetWideFlag(cmd *cobra.Command) bool {
|
|||
func GetFlagBool(cmd *cobra.Command, flag string) bool {
|
||||
b, err := cmd.Flags().GetBool(flag)
|
||||
if err != nil {
|
||||
glog.Fatalf("err %v accessing flag %s for command %s: %s", err, flag, cmd.Name())
|
||||
glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ func GetFlagBool(cmd *cobra.Command, flag string) bool {
|
|||
func GetFlagInt(cmd *cobra.Command, flag string) int {
|
||||
i, err := cmd.Flags().GetInt(flag)
|
||||
if err != nil {
|
||||
glog.Fatalf("err: %v accessing flag %s for command %s: %s", err, flag, cmd.Name())
|
||||
glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
@ -254,7 +254,7 @@ func GetFlagInt(cmd *cobra.Command, flag string) int {
|
|||
func GetFlagDuration(cmd *cobra.Command, flag string) time.Duration {
|
||||
d, err := cmd.Flags().GetDuration(flag)
|
||||
if err != nil {
|
||||
glog.Fatalf("err: %v accessing flag %s for command %s: %s", err, flag, cmd.Name())
|
||||
glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
|
|
@ -314,7 +314,7 @@ func TestDumpReaderToFile(t *testing.T) {
|
|||
}
|
||||
data, err := ioutil.ReadFile(tempFile.Name())
|
||||
if err != nil {
|
||||
t.Errorf("error when reading %s: %v", tempFile, err)
|
||||
t.Errorf("error when reading %s: %v", tempFile.Name(), err)
|
||||
}
|
||||
stringData := string(data)
|
||||
if stringData != testString {
|
||||
|
|
|
@ -452,8 +452,8 @@ func (d *PersistentVolumeDescriber) Describe(namespace, name string) (string, er
|
|||
} else {
|
||||
fmt.Fprintf(out, "Claim:\t%s\n", "")
|
||||
}
|
||||
fmt.Fprintf(out, "Reclaim Policy:\t%d\n", pv.Spec.PersistentVolumeReclaimPolicy)
|
||||
fmt.Fprintf(out, "Message:\t%d\n", pv.Status.Message)
|
||||
fmt.Fprintf(out, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy)
|
||||
fmt.Fprintf(out, "Message:\t%s\n", pv.Status.Message)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -473,8 +473,8 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string) (strin
|
|||
return tabbedString(func(out io.Writer) error {
|
||||
fmt.Fprintf(out, "Name:\t%s\n", pvc.Name)
|
||||
fmt.Fprintf(out, "Namespace:\t%s\n", pvc.Namespace)
|
||||
fmt.Fprintf(out, "Status:\t%d\n", pvc.Status.Phase)
|
||||
fmt.Fprintf(out, "Volume:\t%d\n", pvc.Spec.VolumeName)
|
||||
fmt.Fprintf(out, "Status:\t%v\n", pvc.Status.Phase)
|
||||
fmt.Fprintf(out, "Volume:\t%s\n", pvc.Spec.VolumeName)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
|
|
@ -377,7 +377,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
|||
retry := &RetryParams{interval, timeout}
|
||||
waitForReplicas := &RetryParams{interval, timeout}
|
||||
if newRc.Spec.Replicas <= 0 {
|
||||
return fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %s\n", newName, newRc.Spec)
|
||||
return fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d\n", newName, newRc.Spec.Replicas)
|
||||
}
|
||||
desired := newRc.Spec.Replicas
|
||||
sourceId := fmt.Sprintf("%s:%s", oldName, oldRc.ObjectMeta.UID)
|
||||
|
|
|
@ -84,7 +84,7 @@ type reaperFake struct {
|
|||
}
|
||||
|
||||
func (c *reaperFake) Pods(namespace string) client.PodInterface {
|
||||
pods := &testclient.FakePods{c.Fake, namespace}
|
||||
pods := &testclient.FakePods{Fake: c.Fake, Namespace: namespace}
|
||||
if c.noSuchPod {
|
||||
return &noSuchPod{pods}
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ func (c *reaperFake) Pods(namespace string) client.PodInterface {
|
|||
}
|
||||
|
||||
func (c *reaperFake) Services(namespace string) client.ServiceInterface {
|
||||
services := &testclient.FakeServices{c.Fake, namespace}
|
||||
services := &testclient.FakeServices{Fake: c.Fake, Namespace: namespace}
|
||||
if c.noDeleteService {
|
||||
return &noDeleteService{services}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}
|
|||
for _, o := range objs {
|
||||
pods = append(pods, o.(*api.Pod))
|
||||
}
|
||||
updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.ApiserverSource}
|
||||
updates <- kubelet.PodUpdate{Pods: pods, Op: kubelet.SET, Source: kubelet.ApiserverSource}
|
||||
}
|
||||
cache.NewReflector(lw, &api.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0).Run()
|
||||
}
|
||||
|
|
|
@ -170,12 +170,12 @@ func (s *podStorage) Merge(source string, change interface{}) error {
|
|||
s.updates <- *updates
|
||||
}
|
||||
if len(deletes.Pods) > 0 || len(adds.Pods) > 0 {
|
||||
s.updates <- kubelet.PodUpdate{s.MergedState().([]*api.Pod), kubelet.SET, source}
|
||||
s.updates <- kubelet.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubelet.SET, Source: source}
|
||||
}
|
||||
|
||||
case PodConfigNotificationSnapshot:
|
||||
if len(updates.Pods) > 0 || len(deletes.Pods) > 0 || len(adds.Pods) > 0 {
|
||||
s.updates <- kubelet.PodUpdate{s.MergedState().([]*api.Pod), kubelet.SET, source}
|
||||
s.updates <- kubelet.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubelet.SET, Source: source}
|
||||
}
|
||||
|
||||
default:
|
||||
|
@ -339,7 +339,7 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco
|
|||
func (s *podStorage) Sync() {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
s.updates <- kubelet.PodUpdate{s.MergedState().([]*api.Pod), kubelet.SET, kubelet.AllSource}
|
||||
s.updates <- kubelet.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubelet.SET, Source: kubelet.AllSource}
|
||||
}
|
||||
|
||||
// Object implements config.Accessor
|
||||
|
|
|
@ -66,7 +66,7 @@ func (s *sourceFile) extractFromPath() error {
|
|||
return err
|
||||
}
|
||||
// Emit an update with an empty PodList to allow FileSource to be marked as seen
|
||||
s.updates <- kubelet.PodUpdate{[]*api.Pod{}, kubelet.SET, kubelet.FileSource}
|
||||
s.updates <- kubelet.PodUpdate{Pods: []*api.Pod{}, Op: kubelet.SET, Source: kubelet.FileSource}
|
||||
return fmt.Errorf("path does not exist, ignoring")
|
||||
}
|
||||
|
||||
|
@ -76,14 +76,14 @@ func (s *sourceFile) extractFromPath() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.FileSource}
|
||||
s.updates <- kubelet.PodUpdate{Pods: pods, Op: kubelet.SET, Source: kubelet.FileSource}
|
||||
|
||||
case statInfo.Mode().IsRegular():
|
||||
pod, err := s.extractFromFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.updates <- kubelet.PodUpdate{[]*api.Pod{pod}, kubelet.SET, kubelet.FileSource}
|
||||
s.updates <- kubelet.PodUpdate{Pods: []*api.Pod{pod}, Op: kubelet.SET, Source: kubelet.FileSource}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("path is not a directory or file")
|
||||
|
|
|
@ -95,7 +95,7 @@ func (s *sourceURL) extractFromURL() error {
|
|||
}
|
||||
if len(data) == 0 {
|
||||
// Emit an update with an empty PodList to allow HTTPSource to be marked as seen
|
||||
s.updates <- kubelet.PodUpdate{[]*api.Pod{}, kubelet.SET, kubelet.HTTPSource}
|
||||
s.updates <- kubelet.PodUpdate{Pods: []*api.Pod{}, Op: kubelet.SET, Source: kubelet.HTTPSource}
|
||||
return fmt.Errorf("zero-length data received from %v", s.url)
|
||||
}
|
||||
// Short circuit if the data has not changed since the last time it was read.
|
||||
|
@ -111,7 +111,7 @@ func (s *sourceURL) extractFromURL() error {
|
|||
// It parsed but could not be used.
|
||||
return singlePodErr
|
||||
}
|
||||
s.updates <- kubelet.PodUpdate{[]*api.Pod{pod}, kubelet.SET, kubelet.HTTPSource}
|
||||
s.updates <- kubelet.PodUpdate{Pods: []*api.Pod{pod}, Op: kubelet.SET, Source: kubelet.HTTPSource}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -126,7 +126,7 @@ func (s *sourceURL) extractFromURL() error {
|
|||
for i := range podList.Items {
|
||||
pods = append(pods, &podList.Items[i])
|
||||
}
|
||||
s.updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.HTTPSource}
|
||||
s.updates <- kubelet.PodUpdate{Pods: pods, Op: kubelet.SET, Source: kubelet.HTTPSource}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -247,7 +247,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
|
|||
var versionedPods runtime.Object
|
||||
err := testapi.Converter().Convert(&testCase.pods, &versionedPods)
|
||||
if err != nil {
|
||||
t.Fatalf("error in versioning the pods: %s", testCase.desc, err)
|
||||
t.Fatalf("%s: error in versioning the pods: %s", testCase.desc, err)
|
||||
}
|
||||
data, err := testapi.Codec().Encode(versionedPods)
|
||||
if err != nil {
|
||||
|
|
|
@ -38,7 +38,7 @@ func TestEnvVarsToMap(t *testing.T) {
|
|||
varMap := EnvVarsToMap(vars)
|
||||
|
||||
if e, a := len(vars), len(varMap); e != a {
|
||||
t.Error("Unexpected map length; expected: %v, got %v", e, a)
|
||||
t.Errorf("Unexpected map length; expected: %d, got %d", e, a)
|
||||
}
|
||||
|
||||
if a := varMap["foo"]; a != "bar" {
|
||||
|
|
|
@ -269,7 +269,7 @@ func TestPullWithJSONError(t *testing.T) {
|
|||
}
|
||||
err := puller.Pull(test.imageName, []api.Secret{})
|
||||
if err == nil || !strings.Contains(err.Error(), test.expectedError) {
|
||||
t.Errorf("%d: expect error %s, got : %s", i, test.expectedError, err)
|
||||
t.Errorf("%s: expect error %s, got : %s", i, test.expectedError, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
|
@ -288,7 +288,7 @@ func (f *FakeDockerClient) CreateExec(opts docker.CreateExecOptions) (*docker.Ex
|
|||
defer f.Unlock()
|
||||
f.execCmd = opts.Cmd
|
||||
f.called = append(f.called, "create_exec")
|
||||
return &docker.Exec{"12345678"}, nil
|
||||
return &docker.Exec{ID: "12345678"}, nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) StartExec(_ string, _ docker.StartExecOptions) error {
|
||||
|
|
|
@ -588,7 +588,7 @@ func (dm *DockerManager) runContainer(
|
|||
if len(containerHostname) > hostnameMaxLen {
|
||||
containerHostname = containerHostname[:hostnameMaxLen]
|
||||
}
|
||||
namespacedName := types.NamespacedName{pod.Namespace, pod.Name}
|
||||
namespacedName := types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}
|
||||
labels := map[string]string{
|
||||
"io.kubernetes.pod.name": namespacedName.String(),
|
||||
}
|
||||
|
@ -1041,7 +1041,7 @@ func (dm *DockerManager) ExecInContainer(containerId string, cmd []string, stdin
|
|||
return err
|
||||
}
|
||||
if !container.State.Running {
|
||||
return fmt.Errorf("container not running (%s)", container)
|
||||
return fmt.Errorf("container not running (%s)", container.ID)
|
||||
}
|
||||
|
||||
return dm.execHandler.ExecInContainer(dm.client, container, cmd, stdin, stdout, stderr, tty)
|
||||
|
@ -1086,7 +1086,7 @@ func (dm *DockerManager) PortForward(pod *kubecontainer.Pod, port uint16, stream
|
|||
}
|
||||
|
||||
if !container.State.Running {
|
||||
return fmt.Errorf("container not running (%s)", container)
|
||||
return fmt.Errorf("container not running (%s)", container.ID)
|
||||
}
|
||||
|
||||
containerPid := container.State.Pid
|
||||
|
@ -1531,7 +1531,7 @@ func (dm *DockerManager) pullImage(pod *api.Pod, container *api.Container, pullS
|
|||
if err != nil {
|
||||
glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err)
|
||||
}
|
||||
spec := kubecontainer.ImageSpec{container.Image}
|
||||
spec := kubecontainer.ImageSpec{Image: container.Image}
|
||||
present, err := dm.IsImagePresent(spec)
|
||||
if err != nil {
|
||||
if ref != nil {
|
||||
|
|
|
@ -487,7 +487,7 @@ func TestKillContainerInPodWithPreStop(t *testing.T) {
|
|||
}
|
||||
podString, err := testapi.Codec().Encode(pod)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v")
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
containers := []docker.APIContainers{
|
||||
{
|
||||
|
@ -838,7 +838,7 @@ func TestProbeContainer(t *testing.T) {
|
|||
}
|
||||
result, err := manager.prober.Probe(&api.Pod{}, api.PodStatus{}, test.testContainer, dc.ID, dc.Created)
|
||||
if test.expectError && err == nil {
|
||||
t.Error("[%d] Expected error but no error was returned.", i)
|
||||
t.Errorf("[%d] Expected error but no error was returned.", i)
|
||||
}
|
||||
if !test.expectError && err != nil {
|
||||
t.Errorf("[%d] Didn't expect error but got: %v", i, err)
|
||||
|
|
|
@ -177,7 +177,7 @@ func NewMainKubelet(
|
|||
}
|
||||
cache.NewReflector(listWatch, &api.Service{}, serviceStore, 0).Run()
|
||||
}
|
||||
serviceLister := &cache.StoreToServiceLister{serviceStore}
|
||||
serviceLister := &cache.StoreToServiceLister{Store: serviceStore}
|
||||
|
||||
nodeStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
if kubeClient != nil {
|
||||
|
@ -194,7 +194,7 @@ func NewMainKubelet(
|
|||
}
|
||||
cache.NewReflector(listWatch, &api.Node{}, nodeStore, 0).Run()
|
||||
}
|
||||
nodeLister := &cache.StoreToNodeLister{nodeStore}
|
||||
nodeLister := &cache.StoreToNodeLister{Store: nodeStore}
|
||||
|
||||
// TODO: get the real minion object of ourself,
|
||||
// and use the real minion name and UID.
|
||||
|
@ -1234,7 +1234,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
|||
}
|
||||
|
||||
podStatus = pod.Status
|
||||
podStatus.StartTime = &util.Time{start}
|
||||
podStatus.StartTime = &util.Time{Time: start}
|
||||
kl.statusManager.SetPodStatus(pod, podStatus)
|
||||
glog.V(3).Infof("Not generating pod status for new pod %q", podFullName)
|
||||
} else {
|
||||
|
|
|
@ -390,7 +390,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
|
|||
func TestMountExternalVolumes(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{&volume.FakeVolumePlugin{"fake", nil}}, &volumeHost{kubelet})
|
||||
kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{&volume.FakeVolumePlugin{PluginName: "fake", Host: nil}}, &volumeHost{kubelet})
|
||||
|
||||
pod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -425,7 +425,7 @@ func TestMountExternalVolumes(t *testing.T) {
|
|||
func TestGetPodVolumesFromDisk(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
kubelet := testKubelet.kubelet
|
||||
plug := &volume.FakeVolumePlugin{"fake", nil}
|
||||
plug := &volume.FakeVolumePlugin{PluginName: "fake", Host: nil}
|
||||
kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plug}, &volumeHost{kubelet})
|
||||
|
||||
volsOnDisk := []struct {
|
||||
|
@ -439,7 +439,7 @@ func TestGetPodVolumesFromDisk(t *testing.T) {
|
|||
|
||||
expectedPaths := []string{}
|
||||
for i := range volsOnDisk {
|
||||
fv := volume.FakeVolume{volsOnDisk[i].podUID, volsOnDisk[i].volName, plug}
|
||||
fv := volume.FakeVolume{PodUID: volsOnDisk[i].podUID, VolName: volsOnDisk[i].volName, Plugin: plug}
|
||||
fv.SetUp()
|
||||
expectedPaths = append(expectedPaths, fv.GetPath())
|
||||
}
|
||||
|
@ -3140,7 +3140,7 @@ func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
|
|||
podFullName := kubecontainer.GetPodFullName(pods[0])
|
||||
status, found := kubelet.statusManager.GetPodStatus(podFullName)
|
||||
if !found {
|
||||
t.Errorf("expected to found status for pod %q", status)
|
||||
t.Errorf("expected to found status for pod %q", podFullName)
|
||||
}
|
||||
if status.Phase != api.PodFailed {
|
||||
t.Fatalf("expected pod status %q, ot %q.", api.PodFailed, status.Phase)
|
||||
|
@ -3195,7 +3195,7 @@ func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
|
|||
podFullName := kubecontainer.GetPodFullName(pods[0])
|
||||
status, found := kubelet.statusManager.GetPodStatus(podFullName)
|
||||
if !found {
|
||||
t.Errorf("expected to found status for pod %q", status)
|
||||
t.Errorf("expected to found status for pod %q", podFullName)
|
||||
}
|
||||
if status.Phase == api.PodFailed {
|
||||
t.Fatalf("expected pod status to not be %q", status.Phase)
|
||||
|
|
|
@ -63,7 +63,7 @@ func (ow *realOOMWatcher) Start(ref *api.ObjectReference) error {
|
|||
|
||||
for event := range eventChannel.GetChannel() {
|
||||
glog.V(2).Infof("Got sys oom event from cadvisor: %v", event)
|
||||
ow.recorder.PastEventf(ref, util.Time{event.Timestamp}, systemOOMEvent, "System OOM encountered")
|
||||
ow.recorder.PastEventf(ref, util.Time{Time: event.Timestamp}, systemOOMEvent, "System OOM encountered")
|
||||
}
|
||||
glog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor")
|
||||
}()
|
||||
|
|
|
@ -108,7 +108,7 @@ func (kl *Kubelet) mountExternalVolumes(pod *api.Pod) (kubecontainer.VolumeMap,
|
|||
|
||||
// Try to use a plugin for this volume.
|
||||
internal := volume.NewSpecFromVolume(volSpec)
|
||||
builder, err := kl.newVolumeBuilderFromPlugins(internal, pod, volume.VolumeOptions{rootContext}, kl.mounter)
|
||||
builder, err := kl.newVolumeBuilderFromPlugins(internal, pod, volume.VolumeOptions{RootContext: rootContext}, kl.mounter)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not create volume builder for pod %s: %v", pod.UID, err)
|
||||
return nil, err
|
||||
|
|
|
@ -466,7 +466,6 @@ func (p *Parser) parse() ([]Requirement, error) {
|
|||
return nil, fmt.Errorf("found '%s', expected: identifier or 'end of string'", lit)
|
||||
}
|
||||
}
|
||||
return requirements, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseRequirement() (*Requirement, error) {
|
||||
|
@ -558,7 +557,6 @@ func (p *Parser) parseValues() (util.StringSet, error) {
|
|||
default:
|
||||
return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit)
|
||||
}
|
||||
return util.NewStringSet(), nil
|
||||
}
|
||||
|
||||
// parseIdentifiersList parses a (possibly empty) list of
|
||||
|
|
|
@ -230,7 +230,7 @@ func TestLexerSequence(t *testing.T) {
|
|||
}
|
||||
for i := 0; i < min(len(tokens), len(v.t)); i++ {
|
||||
if tokens[i] != v.t[i] {
|
||||
t.Errorf("Test '%s': Mismatching in token type found '%s' it should be '%s'", v.s, tokens[i], v.t[i])
|
||||
t.Errorf("Test '%s': Mismatching in token type found '%v' it should be '%v'", v.s, tokens[i], v.t[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -453,7 +453,7 @@ func TestSetSelectorParser(t *testing.T) {
|
|||
} else if err == nil && !ssp.Valid {
|
||||
t.Errorf("Parse(%s) => %+v expected error", ssp.In, sel)
|
||||
} else if ssp.Match && !reflect.DeepEqual(sel, ssp.Out) {
|
||||
t.Errorf("Parse(%s) => parse output '%t' doesn't match '%t' expected match", ssp.In, sel, ssp.Out)
|
||||
t.Errorf("Parse(%s) => parse output '%v' doesn't match '%v' expected match", ssp.In, sel, ssp.Out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -496,7 +496,7 @@ func TestAdd(t *testing.T) {
|
|||
for _, ts := range testCases {
|
||||
ts.sel = ts.sel.Add(ts.key, ts.operator, ts.values)
|
||||
if !reflect.DeepEqual(ts.sel, ts.refSelector) {
|
||||
t.Errorf("Expected %t found %t", ts.refSelector, ts.sel)
|
||||
t.Errorf("Expected %v found %v", ts.refSelector, ts.sel)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -376,7 +376,7 @@ func New(c *Config) *Master {
|
|||
m.handlerContainer = handlerContainer
|
||||
// Use CurlyRouter to be able to use regular expressions in paths. Regular expressions are required in paths for example for proxy (where the path is proxy/{kind}/{name}/{*})
|
||||
m.handlerContainer.Router(restful.CurlyRouter{})
|
||||
m.muxHelper = &apiserver.MuxHelper{m.mux, []string{}}
|
||||
m.muxHelper = &apiserver.MuxHelper{Mux: m.mux, RegisteredPaths: []string{}}
|
||||
|
||||
m.init(c)
|
||||
|
||||
|
@ -548,7 +548,7 @@ func (m *Master) init(c *Config) {
|
|||
httpKubeletClient.Client.Transport = transport
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Failed to cast %v to HTTPKubeletClient, skipping SSH tunnel.")
|
||||
glog.Errorf("Failed to cast %v to HTTPKubeletClient, skipping SSH tunnel.", c.KubeletClient)
|
||||
}
|
||||
healthzChecks = append(healthzChecks, healthz.NamedCheck("SSH Tunnel Check", m.IsTunnelSyncHealthy))
|
||||
m.lastSyncMetric = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
||||
|
@ -568,7 +568,7 @@ func (m *Master) init(c *Config) {
|
|||
apiserver.InstallSupport(m.muxHelper, m.rootWebService, c.EnableProfiling, healthzChecks...)
|
||||
apiserver.AddApiWebService(m.handlerContainer, c.APIPrefix, apiVersions)
|
||||
defaultVersion := m.defaultAPIGroupVersion()
|
||||
requestInfoResolver := &apiserver.APIRequestInfoResolver{util.NewStringSet(strings.TrimPrefix(defaultVersion.Root, "/")), defaultVersion.Mapper}
|
||||
requestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: util.NewStringSet(strings.TrimPrefix(defaultVersion.Root, "/")), RestMapper: defaultVersion.Mapper}
|
||||
apiserver.InstallServiceErrorHandler(m.handlerContainer, requestInfoResolver, apiVersions)
|
||||
|
||||
if m.exp {
|
||||
|
@ -577,7 +577,7 @@ func (m *Master) init(c *Config) {
|
|||
glog.Fatalf("Unable to setup experimental api: %v", err)
|
||||
}
|
||||
apiserver.AddApiWebService(m.handlerContainer, c.ExpAPIPrefix, []string{expVersion.Version})
|
||||
expRequestInfoResolver := &apiserver.APIRequestInfoResolver{util.NewStringSet(strings.TrimPrefix(expVersion.Root, "/")), expVersion.Mapper}
|
||||
expRequestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: util.NewStringSet(strings.TrimPrefix(expVersion.Root, "/")), RestMapper: expVersion.Mapper}
|
||||
apiserver.InstallServiceErrorHandler(m.handlerContainer, expRequestInfoResolver, []string{expVersion.Version})
|
||||
}
|
||||
|
||||
|
|
|
@ -128,13 +128,13 @@ func (s *endpointsStore) Merge(source string, change interface{}) error {
|
|||
case ADD:
|
||||
glog.V(4).Infof("Adding new endpoint from source %s : %+v", source, update.Endpoints)
|
||||
for _, value := range update.Endpoints {
|
||||
name := types.NamespacedName{value.Namespace, value.Name}
|
||||
name := types.NamespacedName{Namespace: value.Namespace, Name: value.Name}
|
||||
endpoints[name] = value
|
||||
}
|
||||
case REMOVE:
|
||||
glog.V(4).Infof("Removing an endpoint %+v", update)
|
||||
for _, value := range update.Endpoints {
|
||||
name := types.NamespacedName{value.Namespace, value.Name}
|
||||
name := types.NamespacedName{Namespace: value.Namespace, Name: value.Name}
|
||||
delete(endpoints, name)
|
||||
}
|
||||
case SET:
|
||||
|
@ -142,7 +142,7 @@ func (s *endpointsStore) Merge(source string, change interface{}) error {
|
|||
// Clear the old map entries by just creating a new map
|
||||
endpoints = make(map[types.NamespacedName]api.Endpoints)
|
||||
for _, value := range update.Endpoints {
|
||||
name := types.NamespacedName{value.Namespace, value.Name}
|
||||
name := types.NamespacedName{Namespace: value.Namespace, Name: value.Name}
|
||||
endpoints[name] = value
|
||||
}
|
||||
default:
|
||||
|
@ -226,13 +226,13 @@ func (s *serviceStore) Merge(source string, change interface{}) error {
|
|||
case ADD:
|
||||
glog.V(4).Infof("Adding new service from source %s : %+v", source, update.Services)
|
||||
for _, value := range update.Services {
|
||||
name := types.NamespacedName{value.Namespace, value.Name}
|
||||
name := types.NamespacedName{Namespace: value.Namespace, Name: value.Name}
|
||||
services[name] = value
|
||||
}
|
||||
case REMOVE:
|
||||
glog.V(4).Infof("Removing a service %+v", update)
|
||||
for _, value := range update.Services {
|
||||
name := types.NamespacedName{value.Namespace, value.Name}
|
||||
name := types.NamespacedName{Namespace: value.Namespace, Name: value.Name}
|
||||
delete(services, name)
|
||||
}
|
||||
case SET:
|
||||
|
@ -240,7 +240,7 @@ func (s *serviceStore) Merge(source string, change interface{}) error {
|
|||
// Clear the old map entries by just creating a new map
|
||||
services = make(map[types.NamespacedName]api.Service)
|
||||
for _, value := range update.Services {
|
||||
name := types.NamespacedName{value.Namespace, value.Name}
|
||||
name := types.NamespacedName{Namespace: value.Namespace, Name: value.Name}
|
||||
services[name] = value
|
||||
}
|
||||
default:
|
||||
|
|
|
@ -273,14 +273,13 @@ func (proxier *Proxier) OnServiceUpdate(services []api.Service) {
|
|||
|
||||
// if ClusterIP is "None" or empty, skip proxying
|
||||
if !api.IsServiceIPSet(service) {
|
||||
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", types.NamespacedName{service.Namespace, service.Name}, service.Spec.ClusterIP)
|
||||
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, service.Spec.ClusterIP)
|
||||
continue
|
||||
}
|
||||
|
||||
for i := range service.Spec.Ports {
|
||||
servicePort := &service.Spec.Ports[i]
|
||||
|
||||
serviceName := proxy.ServicePortName{types.NamespacedName{service.Namespace, service.Name}, servicePort.Name}
|
||||
serviceName := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: servicePort.Name}
|
||||
activeServices[serviceName] = true
|
||||
serviceIP := net.ParseIP(service.Spec.ClusterIP)
|
||||
info, exists := proxier.getServiceInfo(serviceName)
|
||||
|
|
|
@ -212,7 +212,7 @@ func waitForNumProxyLoops(t *testing.T, p *Proxier, want int32) {
|
|||
|
||||
func TestTCPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
|
@ -239,7 +239,7 @@ func TestTCPProxy(t *testing.T) {
|
|||
|
||||
func TestUDPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
|
@ -266,8 +266,8 @@ func TestUDPProxy(t *testing.T) {
|
|||
|
||||
func TestMultiPortProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo-p"}, "p"}
|
||||
serviceQ := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo-q"}, "q"}
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{{
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
|
@ -305,9 +305,9 @@ func TestMultiPortProxy(t *testing.T) {
|
|||
|
||||
func TestMultiPortOnServiceUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
serviceQ := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "q"}
|
||||
serviceX := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "x"}
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "q"}
|
||||
serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"}
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), &fakeIptables{}, net.ParseIP("127.0.0.1"), nil)
|
||||
if err != nil {
|
||||
|
@ -361,7 +361,7 @@ func stopProxyByName(proxier *Proxier, service proxy.ServicePortName) error {
|
|||
|
||||
func TestTCPProxyStop(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
|
@ -399,7 +399,7 @@ func TestTCPProxyStop(t *testing.T) {
|
|||
|
||||
func TestUDPProxyStop(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
|
@ -437,7 +437,7 @@ func TestUDPProxyStop(t *testing.T) {
|
|||
|
||||
func TestTCPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
|
@ -474,7 +474,7 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
|
|||
|
||||
func TestUDPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
|
@ -511,7 +511,7 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
|
|||
|
||||
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
|
@ -563,7 +563,7 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
|||
|
||||
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
|
@ -615,7 +615,7 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
|||
|
||||
func TestTCPProxyUpdatePort(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
|
@ -663,7 +663,7 @@ func TestTCPProxyUpdatePort(t *testing.T) {
|
|||
|
||||
func TestUDPProxyUpdatePort(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
|
@ -708,7 +708,7 @@ func TestUDPProxyUpdatePort(t *testing.T) {
|
|||
|
||||
func TestProxyUpdatePublicIPs(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
|
@ -760,7 +760,7 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
|
|||
|
||||
func TestProxyUpdatePortal(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "echo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
|
|
|
@ -251,7 +251,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(allEndpoints []api.Endpoints) {
|
|||
}
|
||||
|
||||
for portname := range portsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{types.NamespacedName{svcEndpoints.Namespace, svcEndpoints.Name}, portname}
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: svcEndpoints.Namespace, Name: svcEndpoints.Name}, Port: portname}
|
||||
state, exists := lb.services[svcPort]
|
||||
curEndpoints := []string{}
|
||||
if state != nil {
|
||||
|
|
|
@ -68,7 +68,7 @@ func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) {
|
|||
loadBalancer := NewLoadBalancerRR()
|
||||
var endpoints []api.Endpoints
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, "does-not-exist"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "does-not-exist"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil)
|
||||
if err == nil {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
|
@ -90,7 +90,7 @@ func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.Se
|
|||
|
||||
func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
|
@ -128,7 +128,7 @@ func stringsInSlice(haystack []string, needles ...string) bool {
|
|||
|
||||
func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, "p"}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
|
@ -155,8 +155,8 @@ func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
|
|||
|
||||
func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, "p"}
|
||||
serviceQ := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, "q"}
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
|
@ -198,8 +198,8 @@ func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
|
|||
|
||||
func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, "p"}
|
||||
serviceQ := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, "q"}
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
|
@ -289,8 +289,8 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
|||
|
||||
func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
fooServiceP := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, "p"}
|
||||
barServiceP := proxy.ServicePortName{types.NamespacedName{"testnamespace", "bar"}, "p"}
|
||||
fooServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
barServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(fooServiceP, nil)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
|
@ -345,7 +345,7 @@ func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
|||
|
||||
func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, ""}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
|
@ -402,7 +402,7 @@ func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
|
|||
|
||||
func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, ""}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
|
@ -464,7 +464,7 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
|
|||
client5 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 5), Port: 0}
|
||||
client6 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 6), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, ""}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
|
@ -538,7 +538,7 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
|||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, ""}
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
|
@ -599,7 +599,7 @@ func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
|||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
fooService := proxy.ServicePortName{types.NamespacedName{"testnamespace", "foo"}, ""}
|
||||
fooService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(fooService, nil)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
|
@ -615,7 +615,7 @@ func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
barService := proxy.ServicePortName{types.NamespacedName{"testnamespace", "bar"}, ""}
|
||||
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
|
||||
loadBalancer.NewService(barService, api.ServiceAffinityClientIP, 0)
|
||||
endpoints[1] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
|
||||
|
|
|
@ -79,7 +79,7 @@ func (endpointsStrategy) AllowUnconditionalUpdate() bool {
|
|||
|
||||
// MatchEndpoints returns a generic matcher for a given label and field selector.
|
||||
func MatchEndpoints(label labels.Selector, field fields.Selector) generic.Matcher {
|
||||
return &generic.SelectionPredicate{label, field, EndpointsAttributes}
|
||||
return &generic.SelectionPredicate{Label: label, Field: field, GetAttrs: EndpointsAttributes}
|
||||
}
|
||||
|
||||
// EndpointsAttributes returns the attributes of an endpoint such that a
|
||||
|
|
|
@ -137,13 +137,13 @@ func (rs *REST) getAttrs(obj runtime.Object) (objLabels labels.Set, objFields fi
|
|||
}
|
||||
|
||||
func (rs *REST) List(ctx api.Context, label labels.Selector, field fields.Selector) (runtime.Object, error) {
|
||||
return rs.registry.ListPredicate(ctx, &generic.SelectionPredicate{label, field, rs.getAttrs})
|
||||
return rs.registry.ListPredicate(ctx, &generic.SelectionPredicate{Label: label, Field: field, GetAttrs: rs.getAttrs})
|
||||
}
|
||||
|
||||
// Watch returns Events events via a watch.Interface.
|
||||
// It implements rest.Watcher.
|
||||
func (rs *REST) Watch(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
|
||||
return rs.registry.WatchPredicate(ctx, &generic.SelectionPredicate{label, field, rs.getAttrs}, resourceVersion)
|
||||
return rs.registry.WatchPredicate(ctx, &generic.SelectionPredicate{Label: label, Field: field, GetAttrs: rs.getAttrs}, resourceVersion)
|
||||
}
|
||||
|
||||
// New returns a new api.Event
|
||||
|
|
|
@ -69,7 +69,7 @@ func (limitrangeStrategy) AllowUnconditionalUpdate() bool {
|
|||
}
|
||||
|
||||
func MatchLimitRange(label labels.Selector, field fields.Selector) generic.Matcher {
|
||||
return &generic.SelectionPredicate{label, field, getAttrs}
|
||||
return &generic.SelectionPredicate{Label: label, Field: field, GetAttrs: getAttrs}
|
||||
}
|
||||
|
||||
func getAttrs(obj runtime.Object) (objLabels labels.Set, objFields fields.Set, err error) {
|
||||
|
|
|
@ -38,7 +38,7 @@ func TestMatchNode(t *testing.T) {
|
|||
m := MatchNode(labels.Everything(), field.AsSelector())
|
||||
_, matchesSingle := m.MatchesSingle()
|
||||
if e, a := expectedResult, matchesSingle; e != a {
|
||||
t.Errorf("%+v: expected %v, got %v", e, a)
|
||||
t.Errorf("%+v: expected %v, got %v", fieldSet, e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -124,7 +124,7 @@ func (c *Repair) RunOnce() error {
|
|||
util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not within the service CIDR %s; please recreate", ip, svc.Name, svc.Namespace, c.network))
|
||||
case ipallocator.ErrFull:
|
||||
// TODO: send event
|
||||
return fmt.Errorf("the service CIDR %s is full; you must widen the CIDR in order to create new services")
|
||||
return fmt.Errorf("the service CIDR %v is full; you must widen the CIDR in order to create new services", r)
|
||||
default:
|
||||
return fmt.Errorf("unable to allocate cluster IP %s for service %s/%s due to an unknown error, exiting: %v", ip, svc.Name, svc.Namespace, err)
|
||||
}
|
||||
|
|
|
@ -19,9 +19,10 @@ package portallocator
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"strconv"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func TestAllocate(t *testing.T) {
|
||||
|
@ -42,10 +43,10 @@ func TestAllocate(t *testing.T) {
|
|||
}
|
||||
count++
|
||||
if !pr.Contains(p) {
|
||||
t.Fatalf("allocated %s which is outside of %s", p, pr)
|
||||
t.Fatalf("allocated %d which is outside of %v", p, pr)
|
||||
}
|
||||
if found.Has(strconv.Itoa(p)) {
|
||||
t.Fatalf("allocated %s twice @ %d", p, count)
|
||||
t.Fatalf("allocated %d twice @ %d", p, count)
|
||||
}
|
||||
found.Insert(strconv.Itoa(p))
|
||||
}
|
||||
|
@ -65,7 +66,7 @@ func TestAllocate(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
if released != p {
|
||||
t.Errorf("unexpected %s : %s", p, released)
|
||||
t.Errorf("unexpected %d : %d", p, released)
|
||||
}
|
||||
|
||||
if err := r.Release(released); err != nil {
|
||||
|
@ -139,7 +140,7 @@ func TestSnapshot(t *testing.T) {
|
|||
|
||||
for _, n := range ports {
|
||||
if !other.Has(n) {
|
||||
t.Errorf("restored range does not have %s", n)
|
||||
t.Errorf("restored range does not have %d", n)
|
||||
}
|
||||
}
|
||||
if other.Free() != r.Free() {
|
||||
|
|
|
@ -92,7 +92,7 @@ func (g *conversionGenerator) GenerateConversionsForType(version string, reflect
|
|||
}
|
||||
internalObjType := reflect.TypeOf(internalObj)
|
||||
if internalObjType.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("created object should be of type Ptr: ", internalObjType.Kind())
|
||||
return fmt.Errorf("created object should be of type Ptr: %v", internalObjType.Kind())
|
||||
}
|
||||
inErr := g.generateConversionsBetween(reflection, internalObjType.Elem())
|
||||
outErr := g.generateConversionsBetween(internalObjType.Elem(), reflection)
|
||||
|
@ -173,7 +173,6 @@ func (g *conversionGenerator) generateConversionsBetween(inType, outType reflect
|
|||
// All simple types should be handled correctly with default conversion.
|
||||
return nil
|
||||
}
|
||||
panic("This should never happen")
|
||||
}
|
||||
|
||||
func isComplexType(reflection reflect.Type) bool {
|
||||
|
|
|
@ -153,7 +153,7 @@ func TestEmbeddedObject(t *testing.T) {
|
|||
outer := &EmbeddedTest{
|
||||
ID: "outer",
|
||||
Object: runtime.EmbeddedObject{
|
||||
&EmbeddedTest{
|
||||
Object: &EmbeddedTest{
|
||||
ID: "inner",
|
||||
},
|
||||
},
|
||||
|
@ -212,7 +212,7 @@ func TestDeepCopyOfEmbeddedObject(t *testing.T) {
|
|||
original := &EmbeddedTest{
|
||||
ID: "outer",
|
||||
Object: runtime.EmbeddedObject{
|
||||
&EmbeddedTest{
|
||||
Object: &EmbeddedTest{
|
||||
ID: "inner",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -207,7 +207,7 @@ func TestExternalToInternalMapping(t *testing.T) {
|
|||
encoded string
|
||||
}{
|
||||
{
|
||||
&InternalOptionalExtensionType{Extension: runtime.EmbeddedObject{nil}},
|
||||
&InternalOptionalExtensionType{Extension: runtime.EmbeddedObject{Object: nil}},
|
||||
`{"kind":"OptionalExtensionType","apiVersion":"testExternal"}`,
|
||||
},
|
||||
}
|
||||
|
@ -245,13 +245,13 @@ func TestExtensionMapping(t *testing.T) {
|
|||
encoded string
|
||||
}{
|
||||
{
|
||||
&InternalExtensionType{Extension: runtime.EmbeddedObject{&ExtensionA{TestString: "foo"}}},
|
||||
&InternalExtensionType{Extension: runtime.EmbeddedObject{Object: &ExtensionA{TestString: "foo"}}},
|
||||
`{"kind":"ExtensionType","apiVersion":"testExternal","extension":{"kind":"A","testString":"foo"}}`,
|
||||
}, {
|
||||
&InternalExtensionType{Extension: runtime.EmbeddedObject{&ExtensionB{TestString: "bar"}}},
|
||||
&InternalExtensionType{Extension: runtime.EmbeddedObject{Object: &ExtensionB{TestString: "bar"}}},
|
||||
`{"kind":"ExtensionType","apiVersion":"testExternal","extension":{"kind":"B","testString":"bar"}}`,
|
||||
}, {
|
||||
&InternalExtensionType{Extension: runtime.EmbeddedObject{nil}},
|
||||
&InternalExtensionType{Extension: runtime.EmbeddedObject{Object: nil}},
|
||||
`{"kind":"ExtensionType","apiVersion":"testExternal","extension":null}`,
|
||||
},
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ func (a APIObjectVersioner) UpdateObject(obj runtime.Object, expiration *time.Ti
|
|||
return err
|
||||
}
|
||||
if expiration != nil {
|
||||
objectMeta.DeletionTimestamp = &util.Time{*expiration}
|
||||
objectMeta.DeletionTimestamp = &util.Time{Time: *expiration}
|
||||
}
|
||||
versionString := ""
|
||||
if resourceVersion != 0 {
|
||||
|
|
|
@ -39,7 +39,7 @@ func TestObjectVersioner(t *testing.T) {
|
|||
if obj.ResourceVersion != "5" || obj.DeletionTimestamp != nil {
|
||||
t.Errorf("unexpected resource version: %#v", obj)
|
||||
}
|
||||
now := util.Time{time.Now()}
|
||||
now := util.Time{Time: time.Now()}
|
||||
obj = &TestResource{ObjectMeta: api.ObjectMeta{ResourceVersion: "a"}}
|
||||
if err := v.UpdateObject(obj, &now.Time, 5); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
|
|
|
@ -107,7 +107,7 @@ func NewEtcdClientStartServerIfNecessary(server string) (tools.EtcdClient, error
|
|||
|
||||
type etcdHealth struct {
|
||||
// Note this has to be public so the json library can modify it.
|
||||
Health string `json:health`
|
||||
Health string `json:"health"`
|
||||
}
|
||||
|
||||
func EtcdHealthCheck(data []byte) error {
|
||||
|
|
|
@ -181,8 +181,8 @@ func (w *etcdWatcher) translate() {
|
|||
case err := <-w.etcdError:
|
||||
if err != nil {
|
||||
w.emit(watch.Event{
|
||||
watch.Error,
|
||||
&api.Status{
|
||||
Type: watch.Error,
|
||||
Object: &api.Status{
|
||||
Status: api.StatusFailure,
|
||||
Message: err.Error(),
|
||||
},
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
func ExpectValue(t *testing.T, atomicValue *AtomicValue, expectedValue interface{}) {
|
||||
actualValue := atomicValue.Load()
|
||||
if actualValue != expectedValue {
|
||||
t.Error("Expected to find %v, found %v", expectedValue, actualValue)
|
||||
t.Errorf("Expected to find %v, found %v", expectedValue, actualValue)
|
||||
}
|
||||
ch := make(chan interface{})
|
||||
go func() {
|
||||
|
@ -35,7 +35,7 @@ func ExpectValue(t *testing.T, atomicValue *AtomicValue, expectedValue interface
|
|||
select {
|
||||
case actualValue = <-ch:
|
||||
if actualValue != expectedValue {
|
||||
t.Error("Expected to find %v, found %v", expectedValue, actualValue)
|
||||
t.Errorf("Expected to find %v, found %v", expectedValue, actualValue)
|
||||
return
|
||||
}
|
||||
case <-time.After(time.Second * 5):
|
||||
|
|
|
@ -142,7 +142,7 @@ func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connec
|
|||
} else {
|
||||
if obj, err := api.Scheme.Decode(responseErrorBytes); err == nil {
|
||||
if status, ok := obj.(*api.Status); ok {
|
||||
return nil, &apierrors.StatusError{*status}
|
||||
return nil, &apierrors.StatusError{ErrStatus: *status}
|
||||
}
|
||||
}
|
||||
responseError = string(responseErrorBytes)
|
||||
|
|
|
@ -39,9 +39,9 @@ func testEnsureChain(t *testing.T, protocol Protocol) {
|
|||
// Success.
|
||||
func() ([]byte, error) { return []byte{}, nil },
|
||||
// Exists.
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{1} },
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },
|
||||
// Failure.
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{2} },
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 2} },
|
||||
},
|
||||
}
|
||||
fexec := exec.FakeExec{
|
||||
|
@ -96,7 +96,7 @@ func TestFlushChain(t *testing.T) {
|
|||
// Success.
|
||||
func() ([]byte, error) { return []byte{}, nil },
|
||||
// Failure.
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{1} },
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },
|
||||
},
|
||||
}
|
||||
fexec := exec.FakeExec{
|
||||
|
@ -130,7 +130,7 @@ func TestDeleteChain(t *testing.T) {
|
|||
// Success.
|
||||
func() ([]byte, error) { return []byte{}, nil },
|
||||
// Failure.
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{1} },
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },
|
||||
},
|
||||
}
|
||||
fexec := exec.FakeExec{
|
||||
|
@ -197,7 +197,7 @@ func TestEnsureRuleNew(t *testing.T) {
|
|||
// iptables version check
|
||||
func() ([]byte, error) { return []byte("iptables v1.9.22"), nil },
|
||||
// Status 1 on the first call.
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{1} },
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },
|
||||
// Success on the second call.
|
||||
func() ([]byte, error) { return []byte{}, nil },
|
||||
},
|
||||
|
@ -233,7 +233,7 @@ func TestEnsureRuleErrorChecking(t *testing.T) {
|
|||
// iptables version check
|
||||
func() ([]byte, error) { return []byte("iptables v1.9.22"), nil },
|
||||
// Status 2 on the first call.
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{2} },
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 2} },
|
||||
},
|
||||
}
|
||||
fexec := exec.FakeExec{
|
||||
|
@ -260,9 +260,9 @@ func TestEnsureRuleErrorCreating(t *testing.T) {
|
|||
// iptables version check
|
||||
func() ([]byte, error) { return []byte("iptables v1.9.22"), nil },
|
||||
// Status 1 on the first call.
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{1} },
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },
|
||||
// Status 1 on the second call.
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{1} },
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },
|
||||
},
|
||||
}
|
||||
fexec := exec.FakeExec{
|
||||
|
@ -290,7 +290,7 @@ func TestDeleteRuleAlreadyExists(t *testing.T) {
|
|||
// iptables version check
|
||||
func() ([]byte, error) { return []byte("iptables v1.9.22"), nil },
|
||||
// Status 1 on the first call.
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{1} },
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },
|
||||
},
|
||||
}
|
||||
fexec := exec.FakeExec{
|
||||
|
@ -353,7 +353,7 @@ func TestDeleteRuleErrorChecking(t *testing.T) {
|
|||
// iptables version check
|
||||
func() ([]byte, error) { return []byte("iptables v1.9.22"), nil },
|
||||
// Status 2 on the first call.
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{2} },
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 2} },
|
||||
},
|
||||
}
|
||||
fexec := exec.FakeExec{
|
||||
|
@ -382,7 +382,7 @@ func TestDeleteRuleErrorCreating(t *testing.T) {
|
|||
// Success on the first call.
|
||||
func() ([]byte, error) { return []byte{}, nil },
|
||||
// Status 1 on the second call.
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{1} },
|
||||
func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },
|
||||
},
|
||||
}
|
||||
fexec := exec.FakeExec{
|
||||
|
|
|
@ -298,7 +298,7 @@ Loop:
|
|||
params[i].Known = true
|
||||
params[i].Value, err = strconv.Atoi(value[i])
|
||||
if err != nil {
|
||||
return fmt.Errorf("array index %s is not a number", params[i].Value)
|
||||
return fmt.Errorf("array index %s is not a number", value[i])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -148,7 +148,7 @@ func verifyNoError(t *testing.T, err error, name string) {
|
|||
|
||||
func verifyError(t *testing.T, err error, name string) {
|
||||
if err == nil {
|
||||
t.Fatalf("Unexpected response on %q. Expected: <error> Actual: <no error>")
|
||||
t.Fatalf("Unexpected response on %q. Expected: <error> Actual: <no error>", name)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ func TestSplitPort(t *testing.T) {
|
|||
t.Errorf("%q: Wanted %q, got %q", item.in, e, a)
|
||||
}
|
||||
if e, a := item.valid, valid; e != a {
|
||||
t.Errorf("%q: Wanted %q, got %q", item.in, e, a)
|
||||
t.Errorf("%q: Wanted %t, got %t", item.in, e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -158,14 +158,14 @@ func TestIsValidPortName(t *testing.T) {
|
|||
goodValues := []string{"telnet", "re-mail-ck", "pop3", "a", "a-1", "1-a", "a-1-b-2-c", "1-a-2-b-3"}
|
||||
for _, val := range goodValues {
|
||||
if !IsValidPortName(val) {
|
||||
t.Errorf("expected true for '%d'", val)
|
||||
t.Errorf("expected true for %q", val)
|
||||
}
|
||||
}
|
||||
|
||||
badValues := []string{"longerthan15characters", "", "12345", "1-2-3-4", "-begin", "end-", "two--hyphens", "1-2", "whois++"}
|
||||
for _, val := range badValues {
|
||||
if IsValidPortName(val) {
|
||||
t.Errorf("expected false for '%d'", val)
|
||||
t.Errorf("expected false for %q", val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
|
|||
pod,
|
||||
&mounter,
|
||||
&mountDetector,
|
||||
volume.VolumeOptions{config.rootContext},
|
||||
volume.VolumeOptions{RootContext: config.rootContext},
|
||||
fakeChconRnr)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Builder: %v", err)
|
||||
|
@ -288,7 +288,7 @@ func TestPluginBackCompat(t *testing.T) {
|
|||
Name: "vol1",
|
||||
}
|
||||
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
|
||||
builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{""}, nil)
|
||||
builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{RootContext: ""}, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Builder: %v", err)
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ func TestPlugin(t *testing.T) {
|
|||
},
|
||||
}
|
||||
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
|
||||
builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{""}, mount.New())
|
||||
builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{RootContext: ""}, mount.New())
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Builder: %v", err)
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
t.Errorf("Failed to make a new Builder: %v", err)
|
||||
}
|
||||
if builder == nil {
|
||||
t.Errorf("Got a nil Builder: %v")
|
||||
t.Error("Got a nil Builder")
|
||||
}
|
||||
path := builder.GetPath()
|
||||
if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~glusterfs/vol1" {
|
||||
|
@ -123,7 +123,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
t.Errorf("Failed to make a new Cleaner: %v", err)
|
||||
}
|
||||
if cleaner == nil {
|
||||
t.Errorf("Got a nil Cleaner: %v")
|
||||
t.Error("Got a nil Cleaner")
|
||||
}
|
||||
if err := cleaner.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
|
@ -138,7 +138,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
func TestPluginVolume(t *testing.T) {
|
||||
vol := &api.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"ep", "vol", false}},
|
||||
VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}},
|
||||
}
|
||||
doTestPlugin(t, volume.NewSpecFromVolume(vol))
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ func TestPluginPersistentVolume(t *testing.T) {
|
|||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
Glusterfs: &api.GlusterfsVolumeSource{"ep", "vol", false},
|
||||
Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
|||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
Glusterfs: &api.GlusterfsVolumeSource{"ep", "vol", false},
|
||||
Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
|
||||
},
|
||||
ClaimRef: &api.ObjectReference{
|
||||
Name: "claimA",
|
||||
|
|
|
@ -180,7 +180,7 @@ func (r *hostPathRecycler) Recycle() error {
|
|||
{
|
||||
Name: "vol",
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{r.path},
|
||||
HostPath: &api.HostPathVolumeSource{Path: r.path},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -72,7 +72,7 @@ func TestRecycler(t *testing.T) {
|
|||
}
|
||||
recycler, err := plug.NewRecycler(spec)
|
||||
if err != nil {
|
||||
t.Error("Failed to make a new Recyler: %v", err)
|
||||
t.Errorf("Failed to make a new Recyler: %v", err)
|
||||
}
|
||||
if recycler.GetPath() != spec.PersistentVolumeSource.HostPath.Path {
|
||||
t.Errorf("Expected %s but got %s", spec.PersistentVolumeSource.HostPath.Path, recycler.GetPath())
|
||||
|
@ -112,7 +112,7 @@ func TestPlugin(t *testing.T) {
|
|||
}
|
||||
spec := &api.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{"/vol1"}},
|
||||
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/vol1"}},
|
||||
}
|
||||
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
|
||||
builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{}, nil)
|
||||
|
@ -152,7 +152,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
|||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{"foo"},
|
||||
HostPath: &api.HostPathVolumeSource{Path: "foo"},
|
||||
},
|
||||
ClaimRef: &api.ObjectReference{
|
||||
Name: "claimA",
|
||||
|
|
|
@ -113,7 +113,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
t.Errorf("Failed to make a new Builder: %v", err)
|
||||
}
|
||||
if builder == nil {
|
||||
t.Errorf("Got a nil Builder: %v")
|
||||
t.Error("Got a nil Builder")
|
||||
}
|
||||
|
||||
path := builder.GetPath()
|
||||
|
@ -148,7 +148,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
t.Errorf("Failed to make a new Cleaner: %v", err)
|
||||
}
|
||||
if cleaner == nil {
|
||||
t.Errorf("Got a nil Cleaner: %v")
|
||||
t.Error("Got a nil Cleaner")
|
||||
}
|
||||
|
||||
if err := cleaner.TearDown(); err != nil {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue