diff --git a/cluster/addons/dns/kube2sky/kube2sky.go b/cluster/addons/dns/kube2sky/kube2sky.go index 7d423b12db..0251d10b7f 100644 --- a/cluster/addons/dns/kube2sky/kube2sky.go +++ b/cluster/addons/dns/kube2sky/kube2sky.go @@ -192,7 +192,7 @@ func (ks *kube2sky) generateRecordsForHeadlessService(subdomain string, e *kapi. endpointPort := &e.Subsets[idx].Ports[portIdx] portSegment := buildPortSegmentString(endpointPort.Name, endpointPort.Protocol) if portSegment != "" { - err := ks.generateSRVRecord(subdomain, portSegment, recordLabel, recordKey, endpointPort.Port) + err := ks.generateSRVRecord(subdomain, portSegment, recordLabel, recordKey, int(endpointPort.Port)) if err != nil { return err } @@ -343,7 +343,7 @@ func (ks *kube2sky) generateRecordsForPortalService(subdomain string, service *k port := &service.Spec.Ports[i] portSegment := buildPortSegmentString(port.Name, port.Protocol) if portSegment != "" { - err = ks.generateSRVRecord(subdomain, portSegment, recordLabel, subdomain, port.Port) + err = ks.generateSRVRecord(subdomain, portSegment, recordLabel, subdomain, int(port.Port)) if err != nil { return err } diff --git a/cluster/addons/dns/kube2sky/kube2sky_test.go b/cluster/addons/dns/kube2sky/kube2sky_test.go index 1a410742aa..8686027b69 100644 --- a/cluster/addons/dns/kube2sky/kube2sky_test.go +++ b/cluster/addons/dns/kube2sky/kube2sky_test.go @@ -111,7 +111,7 @@ type hostPort struct { func getHostPort(service *kapi.Service) *hostPort { return &hostPort{ Host: service.Spec.ClusterIP, - Port: service.Spec.Ports[0].Port, + Port: int(service.Spec.Ports[0].Port), } } @@ -181,7 +181,7 @@ func newService(namespace, serviceName, clusterIP, portName string, portNumber i Spec: kapi.ServiceSpec{ ClusterIP: clusterIP, Ports: []kapi.ServicePort{ - {Port: portNumber, Name: portName, Protocol: "TCP"}, + {Port: int32(portNumber), Name: portName, Protocol: "TCP"}, }, }, } @@ -212,7 +212,7 @@ func newSubset() kapi.EndpointSubset { func newSubsetWithOnePort(portName string, port int, ips ...string) kapi.EndpointSubset { subset := newSubset() - subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: port, Name: portName, Protocol: "TCP"}) + subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: int32(port), Name: portName, Protocol: "TCP"}) for _, ip := range ips { subset.Addresses = append(subset.Addresses, kapi.EndpointAddress{IP: ip}) } @@ -221,7 +221,7 @@ func newSubsetWithOnePort(portName string, port int, ips ...string) kapi.Endpoin func newSubsetWithTwoPorts(portName1 string, portNumber1 int, portName2 string, portNumber2 int, ips ...string) kapi.EndpointSubset { subset := newSubsetWithOnePort(portName1, portNumber1, ips...) - subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: portNumber2, Name: portName2, Protocol: "TCP"}) + subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: int32(portNumber2), Name: portName2, Protocol: "TCP"}) return subset } diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index abc2e0e5fd..f974e5c5b1 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -126,7 +126,7 @@ func Run(s *options.CMServer) error { kubeconfig.ContentConfig.ContentType = s.ContentType // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = s.KubeAPIQPS - kubeconfig.Burst = s.KubeAPIBurst + kubeconfig.Burst = int(s.KubeAPIBurst) kubeClient, err := client.New(kubeconfig) if err != nil { @@ -144,7 +144,7 @@ func Run(s *options.CMServer) error { mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ - Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)), + Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))), Handler: mux, } glog.Fatal(server.ListenAndServe()) @@ -198,7 +198,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig informers[reflect.TypeOf(&api.Pod{})] = podInformer go endpointcontroller.NewEndpointController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller"))). - Run(s.ConcurrentEndpointSyncs, wait.NeverStop) + Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) go replicationcontroller.NewReplicationManager( @@ -206,12 +206,12 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replication-controller")), ResyncPeriod(s), replicationcontroller.BurstReplicas, - s.LookupCacheSizeForRC, - ).Run(s.ConcurrentRCSyncs, wait.NeverStop) + int(s.LookupCacheSizeForRC), + ).Run(int(s.ConcurrentRCSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) if s.TerminatedPodGCThreshold > 0 { - go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), s.TerminatedPodGCThreshold). + go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), int(s.TerminatedPodGCThreshold)). Run(wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } @@ -224,8 +224,8 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig // this cidr has been validated already _, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR) nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")), - s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), - flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), + s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)), + flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)), s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs) nodeController.Run(s.NodeSyncPeriod.Duration) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) @@ -268,7 +268,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig ReplenishmentResyncPeriod: ResyncPeriod(s), GroupKindsToReplenish: groupKindsToReplenish, } - go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop) + go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) // If apiserver is not running we should wait for some time and fail only then. This is particularly @@ -299,7 +299,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig glog.Fatalf("Failed to get supported resources from server: %v", err) } namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, groupVersionResources, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes) - go namespaceController.Run(s.ConcurrentNamespaceSyncs, wait.NeverStop) + go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) groupVersion := "extensions/v1beta1" @@ -324,29 +324,29 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig if containsResource(resources, "daemonsets") { glog.Infof("Starting daemon set controller") - go daemon.NewDaemonSetsController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s), s.LookupCacheSizeForDaemonSet). - Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop) + go daemon.NewDaemonSetsController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s), int(s.LookupCacheSizeForDaemonSet)). + Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } if containsResource(resources, "jobs") { glog.Infof("Starting job controller") go job.NewJobController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller"))). - Run(s.ConcurrentJobSyncs, wait.NeverStop) + Run(int(s.ConcurrentJobSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } if containsResource(resources, "deployments") { glog.Infof("Starting deployment controller") go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), ResyncPeriod(s)). - Run(s.ConcurrentDeploymentSyncs, wait.NeverStop) + Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } if containsResource(resources, "replicasets") { glog.Infof("Starting ReplicaSet controller") - go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, s.LookupCacheSizeForRS). - Run(s.ConcurrentRSSyncs, wait.NeverStop) + go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)). + Run(int(s.ConcurrentRSSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } } @@ -364,7 +364,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")), s.PVClaimBinderSyncPeriod.Duration, - s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry, + int(s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry), ProbeRecyclableVolumePlugins(s.VolumeConfiguration), cloud, ) diff --git a/cmd/kube-controller-manager/app/options/options.go b/cmd/kube-controller-manager/app/options/options.go index 7204deba36..180120cd47 100644 --- a/cmd/kube-controller-manager/app/options/options.go +++ b/cmd/kube-controller-manager/app/options/options.go @@ -92,19 +92,19 @@ func NewCMServer() *CMServer { // AddFlags adds flags for a specific CMServer to the specified FlagSet func (s *CMServer) AddFlags(fs *pflag.FlagSet) { - fs.IntVar(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on") + fs.Int32Var(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on") fs.Var(componentconfig.IPVar{Val: &s.Address}, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces)") fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.") fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") - fs.IntVar(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load") - fs.IntVar(&s.ConcurrentRCSyncs, "concurrent_rc_syncs", s.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load") - fs.IntVar(&s.ConcurrentRSSyncs, "concurrent-replicaset-syncs", s.ConcurrentRSSyncs, "The number of replica sets that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load") - fs.IntVar(&s.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load") - fs.IntVar(&s.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load") - fs.IntVar(&s.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load") - fs.IntVar(&s.LookupCacheSizeForRC, "replication-controller-lookup-cache-size", s.LookupCacheSizeForRC, "The the size of lookup cache for replication controllers. Larger number = more responsive replica management, but more MEM load.") - fs.IntVar(&s.LookupCacheSizeForRS, "replicaset-lookup-cache-size", s.LookupCacheSizeForRS, "The the size of lookup cache for replicatsets. Larger number = more responsive replica management, but more MEM load.") - fs.IntVar(&s.LookupCacheSizeForDaemonSet, "daemonset-lookup-cache-size", s.LookupCacheSizeForDaemonSet, "The the size of lookup cache for daemonsets. Larger number = more responsive daemonsets, but more MEM load.") + fs.Int32Var(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load") + fs.Int32Var(&s.ConcurrentRCSyncs, "concurrent_rc_syncs", s.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load") + fs.Int32Var(&s.ConcurrentRSSyncs, "concurrent-replicaset-syncs", s.ConcurrentRSSyncs, "The number of replica sets that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load") + fs.Int32Var(&s.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load") + fs.Int32Var(&s.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load") + fs.Int32Var(&s.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load") + fs.Int32Var(&s.LookupCacheSizeForRC, "replication-controller-lookup-cache-size", s.LookupCacheSizeForRC, "The the size of lookup cache for replication controllers. Larger number = more responsive replica management, but more MEM load.") + fs.Int32Var(&s.LookupCacheSizeForRS, "replicaset-lookup-cache-size", s.LookupCacheSizeForRS, "The the size of lookup cache for replicatsets. Larger number = more responsive replica management, but more MEM load.") + fs.Int32Var(&s.LookupCacheSizeForDaemonSet, "daemonset-lookup-cache-size", s.LookupCacheSizeForDaemonSet, "The the size of lookup cache for daemonsets. Larger number = more responsive daemonsets, but more MEM load.") fs.DurationVar(&s.ServiceSyncPeriod.Duration, "service-sync-period", s.ServiceSyncPeriod.Duration, "The period for syncing services with their external load balancers") fs.DurationVar(&s.NodeSyncPeriod.Duration, "node-sync-period", s.NodeSyncPeriod.Duration, ""+ "The period for syncing nodes from cloudprovider. Longer periods will result in "+ @@ -114,19 +114,19 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) { fs.DurationVar(&s.PVClaimBinderSyncPeriod.Duration, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod.Duration, "The period for syncing persistent volumes and persistent volume claims") fs.DurationVar(&s.MinResyncPeriod.Duration, "min-resync-period", s.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod") fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "pv-recycler-pod-template-filepath-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "The file path to a pod definition used as a template for NFS persistent volume recycling") - fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod") - fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod") + fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod") + fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod") fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.") - fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.") - fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.") + fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.") + fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.") fs.BoolVar(&s.VolumeConfiguration.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.VolumeConfiguration.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.") - fs.IntVar(&s.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.") + fs.Int32Var(&s.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.") fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.") fs.DurationVar(&s.DeploymentControllerSyncPeriod.Duration, "deployment-controller-sync-period", s.DeploymentControllerSyncPeriod.Duration, "Period for syncing the deployments.") fs.DurationVar(&s.PodEvictionTimeout.Duration, "pod-eviction-timeout", s.PodEvictionTimeout.Duration, "The grace period for deleting pods on failed nodes.") fs.Float32Var(&s.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.") - fs.IntVar(&s.DeletingPodsBurst, "deleting-pods-burst", 10, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.") - fs.IntVar(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+ + fs.Int32Var(&s.DeletingPodsBurst, "deleting-pods-burst", 10, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.") + fs.Int32Var(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+ "The number of retries for initial node registration. Retry interval equals node-sync-period.") fs.MarkDeprecated("register-retry-count", "This flag is currently no-op and will be deleted.") fs.DurationVar(&s.NodeMonitorGracePeriod.Duration, "node-monitor-grace-period", s.NodeMonitorGracePeriod.Duration, @@ -147,7 +147,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.RootCAFile, "root-ca-file", s.RootCAFile, "If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.") fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.") fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver") - fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") + fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") fs.DurationVar(&s.ControllerStartInterval.Duration, "controller-start-interval", s.ControllerStartInterval.Duration, "Interval between starting controller managers.") leaderelection.BindFlags(&s.LeaderElection, fs) } diff --git a/cmd/kube-controller-manager/app/plugins.go b/cmd/kube-controller-manager/app/plugins.go index e6e16699bd..33651191b4 100644 --- a/cmd/kube-controller-manager/app/plugins.go +++ b/cmd/kube-controller-manager/app/plugins.go @@ -57,8 +57,8 @@ func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) [] // HostPath recycling is for testing and development purposes only! hostPathConfig := volume.VolumeConfig{ - RecyclerMinimumTimeout: config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, - RecyclerTimeoutIncrement: config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, + RecyclerMinimumTimeout: int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath), + RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath), RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(), } if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil { @@ -67,8 +67,8 @@ func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) [] allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(hostPathConfig)...) nfsConfig := volume.VolumeConfig{ - RecyclerMinimumTimeout: config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, - RecyclerTimeoutIncrement: config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, + RecyclerMinimumTimeout: int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS), + RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS), RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(), } if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, &nfsConfig); err != nil { diff --git a/cmd/kube-proxy/app/options/options.go b/cmd/kube-proxy/app/options/options.go index 388ccb859f..9f342829da 100644 --- a/cmd/kube-proxy/app/options/options.go +++ b/cmd/kube-proxy/app/options/options.go @@ -40,7 +40,7 @@ type ProxyServerConfig struct { ResourceContainer string ContentType string KubeAPIQPS float32 - KubeAPIBurst int + KubeAPIBurst int32 ConfigSyncPeriod time.Duration CleanupAndExit bool NodeRef *api.ObjectReference @@ -63,16 +63,16 @@ func NewProxyConfig() *ProxyServerConfig { func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) { fs.Var(componentconfig.IPVar{Val: &s.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)") fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") - fs.IntVar(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port to bind the health check server. Use 0 to disable.") + fs.Int32Var(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port to bind the health check server. Use 0 to disable.") fs.Var(componentconfig.IPVar{Val: &s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)") - fs.IntVar(s.OOMScoreAdj, "oom-score-adj", util.IntPtrDerefOr(s.OOMScoreAdj, qos.KubeProxyOOMScoreAdj), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]") + fs.Int32Var(s.OOMScoreAdj, "oom-score-adj", util.Int32PtrDerefOr(s.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]") fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).") fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.") fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization information (the master location is set by the master flag).") fs.Var(componentconfig.PortRangeVar{Val: &s.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.") fs.StringVar(&s.HostnameOverride, "hostname-override", s.HostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.") fs.Var(&s.Mode, "proxy-mode", "Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the '"+ExperimentalProxyModeAnnotation+"' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.") - fs.IntVar(s.IPTablesMasqueradeBit, "iptables-masquerade-bit", util.IntPtrDerefOr(s.IPTablesMasqueradeBit, 14), "If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].") + fs.Int32Var(s.IPTablesMasqueradeBit, "iptables-masquerade-bit", util.Int32PtrDerefOr(s.IPTablesMasqueradeBit, 14), "If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].") fs.DurationVar(&s.IPTablesSyncPeriod.Duration, "iptables-sync-period", s.IPTablesSyncPeriod.Duration, "How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.") fs.DurationVar(&s.ConfigSyncPeriod, "config-sync-period", s.ConfigSyncPeriod, "How often configuration from the apiserver is refreshed. Must be greater than 0.") fs.BoolVar(&s.MasqueradeAll, "masquerade-all", s.MasqueradeAll, "If using the pure iptables proxy, SNAT everything") @@ -80,8 +80,8 @@ func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) { fs.BoolVar(&s.CleanupAndExit, "cleanup-iptables", s.CleanupAndExit, "If true cleanup iptables rules and exit.") fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.") fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver") - fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") + fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") fs.DurationVar(&s.UDPIdleTimeout.Duration, "udp-timeout", s.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace") - fs.IntVar(&s.ConntrackMax, "conntrack-max", s.ConntrackMax, "Maximum number of NAT connections to track (0 to leave as-is)") + fs.Int32Var(&s.ConntrackMax, "conntrack-max", s.ConntrackMax, "Maximum number of NAT connections to track (0 to leave as-is)") fs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", s.ConntrackTCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)") } diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 256c5764bf..f0d71ef232 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -150,7 +150,7 @@ func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, err var oomAdjuster *oom.OOMAdjuster if config.OOMScoreAdj != nil { oomAdjuster = oom.NewOOMAdjuster() - if err := oomAdjuster.ApplyOOMScoreAdj(0, *config.OOMScoreAdj); err != nil { + if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*config.OOMScoreAdj)); err != nil { glog.V(2).Info(err) } } @@ -181,7 +181,7 @@ func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, err kubeconfig.ContentType = config.ContentType // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = config.KubeAPIQPS - kubeconfig.Burst = config.KubeAPIBurst + kubeconfig.Burst = int(config.KubeAPIBurst) client, err := kubeclient.New(kubeconfig) if err != nil { @@ -204,7 +204,7 @@ func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, err return nil, fmt.Errorf("Unable to read IPTablesMasqueradeBit from config") } - proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll, *config.IPTablesMasqueradeBit, config.ClusterCIDR) + proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll, int(*config.IPTablesMasqueradeBit), config.ClusterCIDR) if err != nil { glog.Fatalf("Unable to create proxier: %v", err) } @@ -289,7 +289,7 @@ func (s *ProxyServer) Run() error { }) configz.InstallHandler(http.DefaultServeMux) go wait.Until(func() { - err := http.ListenAndServe(s.Config.HealthzBindAddress+":"+strconv.Itoa(s.Config.HealthzPort), nil) + err := http.ListenAndServe(s.Config.HealthzBindAddress+":"+strconv.Itoa(int(s.Config.HealthzPort)), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } @@ -299,7 +299,7 @@ func (s *ProxyServer) Run() error { // Tune conntrack, if requested if s.Conntracker != nil { if s.Config.ConntrackMax > 0 { - if err := s.Conntracker.SetMax(s.Config.ConntrackMax); err != nil { + if err := s.Conntracker.SetMax(int(s.Config.ConntrackMax)); err != nil { return err } } diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 4ac0b83e06..102b280287 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -117,7 +117,7 @@ func NewKubeletServer() *KubeletServer { VolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", NodeStatusUpdateFrequency: unversioned.Duration{Duration: 10 * time.Second}, NodeLabels: make(map[string]string), - OOMScoreAdj: qos.KubeletOOMScoreAdj, + OOMScoreAdj: int32(qos.KubeletOOMScoreAdj), LockFilePath: "", PodInfraContainerImage: GetDefaultPodInfraContainerImage(), Port: ports.KubeletPort, @@ -176,21 +176,21 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.HostPIDSources, "host-pid-sources", s.HostPIDSources, "Comma-separated list of sources from which the Kubelet allows pods to use the host pid namespace. [default=\"*\"]") fs.StringVar(&s.HostIPCSources, "host-ipc-sources", s.HostIPCSources, "Comma-separated list of sources from which the Kubelet allows pods to use the host ipc namespace. [default=\"*\"]") fs.Float64Var(&s.RegistryPullQPS, "registry-qps", s.RegistryPullQPS, "If > 0, limit registry pull QPS to this value. If 0, unlimited. [default=5.0]") - fs.IntVar(&s.RegistryBurst, "registry-burst", s.RegistryBurst, "Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0") + fs.Int32Var(&s.RegistryBurst, "registry-burst", s.RegistryBurst, "Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0") fs.Float32Var(&s.EventRecordQPS, "event-qps", s.EventRecordQPS, "If > 0, limit event creations per second to this value. If 0, unlimited.") - fs.IntVar(&s.EventBurst, "event-burst", s.EventBurst, "Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0") + fs.Int32Var(&s.EventBurst, "event-burst", s.EventBurst, "Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0") fs.BoolVar(&s.RunOnce, "runonce", s.RunOnce, "If true, exit after spawning pods from local manifests or remote urls. Exclusive with --api-servers, and --enable-server") fs.BoolVar(&s.EnableDebuggingHandlers, "enable-debugging-handlers", s.EnableDebuggingHandlers, "Enables server endpoints for log collection and local running of containers and commands") fs.DurationVar(&s.MinimumGCAge.Duration, "minimum-container-ttl-duration", s.MinimumGCAge.Duration, "Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m'") - fs.IntVar(&s.MaxPerPodContainerCount, "maximum-dead-containers-per-container", s.MaxPerPodContainerCount, "Maximum number of old instances to retain per container. Each container takes up some disk space. Default: 2.") - fs.IntVar(&s.MaxContainerCount, "maximum-dead-containers", s.MaxContainerCount, "Maximum number of old instances of containers to retain globally. Each container takes up some disk space. Default: 100.") + fs.Int32Var(&s.MaxPerPodContainerCount, "maximum-dead-containers-per-container", s.MaxPerPodContainerCount, "Maximum number of old instances to retain per container. Each container takes up some disk space. Default: 2.") + fs.Int32Var(&s.MaxContainerCount, "maximum-dead-containers", s.MaxContainerCount, "Maximum number of old instances of containers to retain globally. Each container takes up some disk space. Default: 100.") fs.Var(&s.AuthPath, "auth-path", "Path to .kubernetes_auth file, specifying how to authenticate to API server.") fs.MarkDeprecated("auth-path", "will be removed in a future version") fs.Var(&s.KubeConfig, "kubeconfig", "Path to a kubeconfig file, specifying how to authenticate to API server (the master location is set by the api-servers flag).") fs.UintVar(&s.CAdvisorPort, "cadvisor-port", s.CAdvisorPort, "The port of the localhost cAdvisor endpoint") - fs.IntVar(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port of the localhost healthz endpoint") + fs.Int32Var(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port of the localhost healthz endpoint") fs.Var(componentconfig.IPVar{Val: &s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the healthz server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)") - fs.IntVar(&s.OOMScoreAdj, "oom-score-adj", s.OOMScoreAdj, "The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]") + fs.Int32Var(&s.OOMScoreAdj, "oom-score-adj", s.OOMScoreAdj, "The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]") fs.StringSliceVar(&s.APIServerList, "api-servers", []string{}, "List of Kubernetes API servers for publishing events, and reading pods and services. (ip:port), comma separated.") fs.BoolVar(&s.RegisterNode, "register-node", s.RegisterNode, "Register the node with the apiserver (defaults to true if --api-servers is set)") fs.StringVar(&s.ClusterDomain, "cluster-domain", s.ClusterDomain, "Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains") @@ -201,9 +201,9 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { bindableNodeLabels := config.ConfigurationMap(s.NodeLabels) fs.Var(&bindableNodeLabels, "node-labels", " Labels to add when registering the node in the cluster. Labels must be key=value pairs separated by ','.") fs.DurationVar(&s.ImageMinimumGCAge.Duration, "minimum-image-ttl-duration", s.ImageMinimumGCAge.Duration, "Minimum age for a unused image before it is garbage collected. Examples: '300ms', '10s' or '2h45m'. Default: '2m'") - fs.IntVar(&s.ImageGCHighThresholdPercent, "image-gc-high-threshold", s.ImageGCHighThresholdPercent, "The percent of disk usage after which image garbage collection is always run. Default: 90%") - fs.IntVar(&s.ImageGCLowThresholdPercent, "image-gc-low-threshold", s.ImageGCLowThresholdPercent, "The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. Default: 80%") - fs.IntVar(&s.LowDiskSpaceThresholdMB, "low-diskspace-threshold-mb", s.LowDiskSpaceThresholdMB, "The absolute free disk space, in MB, to maintain. When disk space falls below this threshold, new pods would be rejected. Default: 256") + fs.Int32Var(&s.ImageGCHighThresholdPercent, "image-gc-high-threshold", s.ImageGCHighThresholdPercent, "The percent of disk usage after which image garbage collection is always run. Default: 90%") + fs.Int32Var(&s.ImageGCLowThresholdPercent, "image-gc-low-threshold", s.ImageGCLowThresholdPercent, "The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. Default: 80%") + fs.Int32Var(&s.LowDiskSpaceThresholdMB, "low-diskspace-threshold-mb", s.LowDiskSpaceThresholdMB, "The absolute free disk space, in MB, to maintain. When disk space falls below this threshold, new pods would be rejected. Default: 256") fs.DurationVar(&s.VolumeStatsAggPeriod.Duration, "volume-stats-agg-period", s.VolumeStatsAggPeriod.Duration, "Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes. To disable volume calculations, set to 0. Default: '1m'") fs.StringVar(&s.NetworkPluginName, "network-plugin", s.NetworkPluginName, " The name of the network plugin to be invoked for various events in kubelet/pod lifecycle") fs.StringVar(&s.NetworkPluginDir, "network-plugin-dir", s.NetworkPluginDir, " The full path of the directory in which to search for network plugins") @@ -230,7 +230,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.HairpinMode, "hairpin-mode", s.HairpinMode, "How should the kubelet setup hairpin NAT. This allows endpoints of a Service to loadbalance back to themselves if they should try to access their own Service. Valid values are \"promiscuous-bridge\", \"hairpin-veth\" and \"none\".") fs.BoolVar(&s.BabysitDaemons, "babysit-daemons", s.BabysitDaemons, "If true, the node has babysitter process monitoring docker and kubelet.") fs.MarkDeprecated("babysit-daemons", "Will be removed in a future version.") - fs.IntVar(&s.MaxPods, "max-pods", s.MaxPods, "Number of Pods that can run on this Kubelet.") + fs.Int32Var(&s.MaxPods, "max-pods", s.MaxPods, "Number of Pods that can run on this Kubelet.") fs.StringVar(&s.DockerExecHandlerName, "docker-exec-handler", s.DockerExecHandlerName, "Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.") fs.StringVar(&s.NonMasqueradeCIDR, "non-masquerade-cidr", s.NonMasqueradeCIDR, "Traffic to IPs outside this range will use IP masquerade.") fs.StringVar(&s.PodCIDR, "pod-cidr", "", "The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.") @@ -247,7 +247,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { fs.BoolVar(&s.RegisterSchedulable, "register-schedulable", s.RegisterSchedulable, "Register the node as schedulable. No-op if register-node is false. [default=true]") fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.") fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver") - fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") + fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") fs.BoolVar(&s.SerializeImagePulls, "serialize-image-pulls", s.SerializeImagePulls, "Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. [default=true]") fs.BoolVar(&s.ExperimentalFlannelOverlay, "experimental-flannel-overlay", s.ExperimentalFlannelOverlay, "Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]") fs.DurationVar(&s.OutOfDiskTransitionFrequency.Duration, "outofdisk-transition-frequency", s.OutOfDiskTransitionFrequency.Duration, "Duration for which the kubelet has to wait before transitioning out of out-of-disk node condition status. Default: 5m0s") diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 8f45875b93..187d873014 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -160,13 +160,13 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) { imageGCPolicy := kubelet.ImageGCPolicy{ MinAge: s.ImageMinimumGCAge.Duration, - HighThresholdPercent: s.ImageGCHighThresholdPercent, - LowThresholdPercent: s.ImageGCLowThresholdPercent, + HighThresholdPercent: int(s.ImageGCHighThresholdPercent), + LowThresholdPercent: int(s.ImageGCLowThresholdPercent), } diskSpacePolicy := kubelet.DiskSpacePolicy{ - DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, - RootFreeDiskMB: s.LowDiskSpaceThresholdMB, + DockerFreeDiskMB: int(s.LowDiskSpaceThresholdMB), + RootFreeDiskMB: int(s.LowDiskSpaceThresholdMB), } manifestURLHeader := make(http.Header) @@ -205,7 +205,7 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) { EnableCustomMetrics: s.EnableCustomMetrics, EnableDebuggingHandlers: s.EnableDebuggingHandlers, EnableServer: s.EnableServer, - EventBurst: s.EventBurst, + EventBurst: int(s.EventBurst), EventRecordQPS: s.EventRecordQPS, FileCheckFrequency: s.FileCheckFrequency.Duration, HostnameOverride: s.HostnameOverride, @@ -218,10 +218,10 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) { ManifestURL: s.ManifestURL, ManifestURLHeader: manifestURLHeader, MasterServiceNamespace: s.MasterServiceNamespace, - MaxContainerCount: s.MaxContainerCount, + MaxContainerCount: int(s.MaxContainerCount), MaxOpenFiles: s.MaxOpenFiles, - MaxPerPodContainerCount: s.MaxPerPodContainerCount, - MaxPods: s.MaxPods, + MaxPerPodContainerCount: int(s.MaxPerPodContainerCount), + MaxPods: int(s.MaxPods), MinimumGCAge: s.MinimumGCAge.Duration, Mounter: mounter, NetworkPluginName: s.NetworkPluginName, @@ -238,7 +238,7 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) { ReadOnlyPort: s.ReadOnlyPort, RegisterNode: s.RegisterNode, RegisterSchedulable: s.RegisterSchedulable, - RegistryBurst: s.RegistryBurst, + RegistryBurst: int(s.RegistryBurst), RegistryPullQPS: s.RegistryPullQPS, ResolverConfig: s.ResolverConfig, Reservation: *reservation, @@ -302,7 +302,7 @@ func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) { // make a separate client for events eventClientConfig := *clientConfig eventClientConfig.QPS = s.EventRecordQPS - eventClientConfig.Burst = s.EventBurst + eventClientConfig.Burst = int(s.EventBurst) kcfg.EventClient, err = clientset.NewForConfig(&eventClientConfig) } if err != nil && len(s.APIServerList) > 0 { @@ -349,7 +349,7 @@ func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) { // TODO(vmarmol): Do this through container config. oomAdjuster := kcfg.OOMAdjuster - if err := oomAdjuster.ApplyOOMScoreAdj(0, s.OOMScoreAdj); err != nil { + if err := oomAdjuster.ApplyOOMScoreAdj(0, int(s.OOMScoreAdj)); err != nil { glog.Warning(err) } @@ -360,7 +360,7 @@ func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) { if s.HealthzPort > 0 { healthz.DefaultHealthz() go wait.Until(func() { - err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(s.HealthzPort)), nil) + err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(int(s.HealthzPort))), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } @@ -473,7 +473,7 @@ func CreateAPIServerClientConfig(s *options.KubeletServer) (*restclient.Config, clientConfig.ContentType = s.ContentType // Override kubeconfig qps/burst settings from flags clientConfig.QPS = s.KubeAPIQPS - clientConfig.Burst = s.KubeAPIBurst + clientConfig.Burst = int(s.KubeAPIBurst) addChaosToClientConfig(s, clientConfig) return clientConfig, nil @@ -801,7 +801,7 @@ func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod } daemonEndpoints := &api.NodeDaemonEndpoints{ - KubeletEndpoint: api.DaemonEndpoint{Port: int(kc.Port)}, + KubeletEndpoint: api.DaemonEndpoint{Port: int32(kc.Port)}, } pc = kc.PodConfig diff --git a/contrib/mesos/pkg/controllermanager/controllermanager.go b/contrib/mesos/pkg/controllermanager/controllermanager.go index 8e2868b1d4..ea7cc158f3 100644 --- a/contrib/mesos/pkg/controllermanager/controllermanager.go +++ b/contrib/mesos/pkg/controllermanager/controllermanager.go @@ -127,20 +127,20 @@ func (s *CMServer) Run(_ []string) error { } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ - Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)), + Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() endpoints := s.createEndpointController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller"))) - go endpoints.Run(s.ConcurrentEndpointSyncs, wait.NeverStop) + go endpoints.Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop) - go replicationcontroller.NewReplicationManagerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replication-controller")), s.resyncPeriod, replicationcontroller.BurstReplicas, s.LookupCacheSizeForRC). - Run(s.ConcurrentRCSyncs, wait.NeverStop) + go replicationcontroller.NewReplicationManagerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replication-controller")), s.resyncPeriod, replicationcontroller.BurstReplicas, int(s.LookupCacheSizeForRC)). + Run(int(s.ConcurrentRCSyncs), wait.NeverStop) if s.TerminatedPodGCThreshold > 0 { - go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), s.resyncPeriod, s.TerminatedPodGCThreshold). + go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), s.resyncPeriod, int(s.TerminatedPodGCThreshold)). Run(wait.NeverStop) } @@ -154,8 +154,8 @@ func (s *CMServer) Run(_ []string) error { } _, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR) nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")), - s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), - flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), + s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)), + flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)), s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs) nodeController.Run(s.NodeSyncPeriod.Duration) @@ -195,7 +195,7 @@ func (s *CMServer) Run(_ []string) error { ReplenishmentResyncPeriod: s.resyncPeriod, ControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactoryFromClient(resourceQuotaControllerClient), } - go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop) + go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), wait.NeverStop) // If apiserver is not running we should wait for some time and fail only then. This is particularly // important when we start apiserver and controller manager at the same time. @@ -225,7 +225,7 @@ func (s *CMServer) Run(_ []string) error { glog.Fatalf("Failed to get supported resources from server: %v", err) } namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, groupVersionResources, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes) - go namespaceController.Run(s.ConcurrentNamespaceSyncs, wait.NeverStop) + go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop) groupVersion := "extensions/v1beta1" resources, found := resourceMap[groupVersion] @@ -248,26 +248,26 @@ func (s *CMServer) Run(_ []string) error { if containsResource(resources, "daemonsets") { glog.Infof("Starting daemon set controller") - go daemon.NewDaemonSetsControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), s.resyncPeriod, s.LookupCacheSizeForDaemonSet). - Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop) + go daemon.NewDaemonSetsControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), s.resyncPeriod, int(s.LookupCacheSizeForDaemonSet)). + Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop) } if containsResource(resources, "jobs") { glog.Infof("Starting job controller") go job.NewJobControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller")), s.resyncPeriod). - Run(s.ConcurrentJobSyncs, wait.NeverStop) + Run(int(s.ConcurrentJobSyncs), wait.NeverStop) } if containsResource(resources, "deployments") { glog.Infof("Starting deployment controller") go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), s.resyncPeriod). - Run(s.ConcurrentDeploymentSyncs, wait.NeverStop) + Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop) } if containsResource(resources, "replicasets") { glog.Infof("Starting ReplicaSet controller") - go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), s.resyncPeriod, replicaset.BurstReplicas, s.LookupCacheSizeForRS). - Run(s.ConcurrentRSSyncs, wait.NeverStop) + go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), s.resyncPeriod, replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)). + Run(int(s.ConcurrentRSSyncs), wait.NeverStop) } } @@ -286,7 +286,7 @@ func (s *CMServer) Run(_ []string) error { pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")), s.PVClaimBinderSyncPeriod.Duration, - s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry, + int(s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry), kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration), cloud, ) diff --git a/contrib/mesos/pkg/executor/executor_test.go b/contrib/mesos/pkg/executor/executor_test.go index 89e126e1c6..095511be3f 100644 --- a/contrib/mesos/pkg/executor/executor_test.go +++ b/contrib/mesos/pkg/executor/executor_test.go @@ -514,7 +514,7 @@ func NewTestPod(i int) *api.Pod { Name: "foo", Ports: []api.ContainerPort{ { - ContainerPort: 8000 + i, + ContainerPort: int32(8000 + i), Protocol: api.ProtocolTCP, }, }, diff --git a/contrib/mesos/pkg/executor/service/service.go b/contrib/mesos/pkg/executor/service/service.go index c7c7031c6c..72b644e77a 100644 --- a/contrib/mesos/pkg/executor/service/service.go +++ b/contrib/mesos/pkg/executor/service/service.go @@ -191,7 +191,7 @@ func (s *KubeletExecutorServer) runKubelet( // make a separate client for events eventClientConfig.QPS = s.EventRecordQPS - eventClientConfig.Burst = s.EventBurst + eventClientConfig.Burst = int(s.EventBurst) kcfg.EventClient, err = clientset.NewForConfig(eventClientConfig) if err != nil { return err diff --git a/contrib/mesos/pkg/scheduler/components/binder/binder.go b/contrib/mesos/pkg/scheduler/components/binder/binder.go index 4cb39fa529..7dc9366939 100644 --- a/contrib/mesos/pkg/scheduler/components/binder/binder.go +++ b/contrib/mesos/pkg/scheduler/components/binder/binder.go @@ -140,7 +140,7 @@ func (b *binder) prepareTaskForLaunch(ctx api.Context, machine string, task *pod oemPorts := pod.Spec.Containers[entry.ContainerIdx].Ports ports := append([]api.ContainerPort{}, oemPorts...) p := &ports[entry.PortIdx] - p.HostPort = int(entry.OfferPort) + p.HostPort = int32(entry.OfferPort) op := strconv.FormatUint(entry.OfferPort, 10) pod.Annotations[fmt.Sprintf(annotation.PortMappingKeyFormat, p.Protocol, p.ContainerPort)] = op if p.Name != "" { diff --git a/contrib/mesos/pkg/scheduler/integration/integration_test.go b/contrib/mesos/pkg/scheduler/integration/integration_test.go index 9fc79d7143..c56bef6fa7 100644 --- a/contrib/mesos/pkg/scheduler/integration/integration_test.go +++ b/contrib/mesos/pkg/scheduler/integration/integration_test.go @@ -293,7 +293,7 @@ func NewTestPod() (*api.Pod, int) { { Ports: []api.ContainerPort{ { - ContainerPort: 8000 + currentPodNum, + ContainerPort: int32(8000 + currentPodNum), Protocol: api.ProtocolTCP, }, }, diff --git a/contrib/mesos/pkg/scheduler/service/publish.go b/contrib/mesos/pkg/scheduler/service/publish.go index bb05431023..e3c4206028 100644 --- a/contrib/mesos/pkg/scheduler/service/publish.go +++ b/contrib/mesos/pkg/scheduler/service/publish.go @@ -73,7 +73,7 @@ func (m *SchedulerServer) createSchedulerServiceIfNeeded(serviceName string, ser Labels: map[string]string{"provider": "k8sm", "component": "scheduler"}, }, Spec: api.ServiceSpec{ - Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}}, + Ports: []api.ServicePort{{Port: int32(servicePort), Protocol: api.ProtocolTCP}}, // maintained by this code, not by the pod selector Selector: nil, SessionAffinity: api.ServiceAffinityNone, @@ -96,7 +96,7 @@ func (m *SchedulerServer) setEndpoints(serviceName string, ip net.IP, port int) // The setting we want to find. want := []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: ip.String()}}, - Ports: []api.EndpointPort{{Port: port, Protocol: api.ProtocolTCP}}, + Ports: []api.EndpointPort{{Port: int32(port), Protocol: api.ProtocolTCP}}, }} ctx := api.NewDefaultContext() diff --git a/contrib/mesos/pkg/service/endpoints_controller.go b/contrib/mesos/pkg/service/endpoints_controller.go index 1b032c0e0d..0b749b121d 100644 --- a/contrib/mesos/pkg/service/endpoints_controller.go +++ b/contrib/mesos/pkg/service/endpoints_controller.go @@ -320,7 +320,7 @@ func (e *endpointController) syncService(key string) { } // HACK(jdef): use HostIP instead of pod.CurrentState.PodIP for generic mesos compat - epp := api.EndpointPort{Name: portName, Port: portNum, Protocol: portProto} + epp := api.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto} epa := api.EndpointAddress{IP: pod.Status.HostIP, TargetRef: &api.ObjectReference{ Kind: "Pod", Namespace: pod.ObjectMeta.Namespace, @@ -416,7 +416,7 @@ func findPort(pod *api.Pod, svcPort *api.ServicePort) (int, int, error) { for _, port := range container.Ports { if port.Name == name && port.Protocol == svcPort.Protocol { hostPort, err := findMappedPortName(pod, port.Protocol, name) - return hostPort, port.ContainerPort, err + return hostPort, int(port.ContainerPort), err } } } @@ -429,9 +429,9 @@ func findPort(pod *api.Pod, svcPort *api.ServicePort) (int, int, error) { p := portName.IntValue() for _, container := range pod.Spec.Containers { for _, port := range container.Ports { - if port.ContainerPort == p && port.Protocol == svcPort.Protocol { + if int(port.ContainerPort) == p && port.Protocol == svcPort.Protocol { hostPort, err := findMappedPort(pod, port.Protocol, p) - return hostPort, port.ContainerPort, err + return hostPort, int(port.ContainerPort), err } } } diff --git a/pkg/api/pod/util.go b/pkg/api/pod/util.go index 97a1b9e6e8..1bdacfe20d 100644 --- a/pkg/api/pod/util.go +++ b/pkg/api/pod/util.go @@ -49,7 +49,7 @@ func FindPort(pod *api.Pod, svcPort *api.ServicePort) (int, error) { for _, container := range pod.Spec.Containers { for _, port := range container.Ports { if port.Name == name && port.Protocol == svcPort.Protocol { - return port.ContainerPort, nil + return int(port.ContainerPort), nil } } } diff --git a/pkg/api/testing/fuzzer.go b/pkg/api/testing/fuzzer.go index 202d503a04..fcc79e0663 100644 --- a/pkg/api/testing/fuzzer.go +++ b/pkg/api/testing/fuzzer.go @@ -159,8 +159,8 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) }, func(j *batch.JobSpec, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again - completions := int(c.Rand.Int31()) - parallelism := int(c.Rand.Int31()) + completions := int32(c.Rand.Int31()) + parallelism := int32(c.Rand.Int31()) j.Completions = &completions j.Parallelism = ¶llelism if c.Rand.Int31()%2 == 0 { @@ -395,9 +395,9 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) }, func(s *extensions.HorizontalPodAutoscalerSpec, c fuzz.Continue) { c.FuzzNoCustom(s) // fuzz self without calling this function again - minReplicas := int(c.Rand.Int31()) + minReplicas := int32(c.Rand.Int31()) s.MinReplicas = &minReplicas - s.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: int(int32(c.RandUint64()))} + s.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: int32(c.RandUint64())} }, func(s *extensions.SubresourceReference, c fuzz.Continue) { c.FuzzNoCustom(s) // fuzz self without calling this function again diff --git a/pkg/api/v1/conversion.go b/pkg/api/v1/conversion.go index f99bab435e..90db9fced2 100644 --- a/pkg/api/v1/conversion.go +++ b/pkg/api/v1/conversion.go @@ -236,7 +236,7 @@ func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *R if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ReplicationControllerSpec))(in) } - out.Replicas = int(*in.Replicas) + out.Replicas = *in.Replicas if in.Selector != nil { out.Selector = make(map[string]string) for key, val := range in.Selector { diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go index a84fb8d9a7..309721701b 100644 --- a/pkg/api/validation/validation.go +++ b/pkg/api/validation/validation.go @@ -994,10 +994,10 @@ func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) fiel } if port.ContainerPort == 0 { allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, PortRangeErrorMsg)) - } else if !validation.IsValidPortNum(port.ContainerPort) { + } else if !validation.IsValidPortNum(int(port.ContainerPort)) { allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, PortRangeErrorMsg)) } - if port.HostPort != 0 && !validation.IsValidPortNum(port.HostPort) { + if port.HostPort != 0 && !validation.IsValidPortNum(int(port.HostPort)) { allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, PortRangeErrorMsg)) } if len(port.Protocol) == 0 { @@ -1808,7 +1808,7 @@ func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService boo } } - if !validation.IsValidPortNum(sp.Port) { + if !validation.IsValidPortNum(int(sp.Port)) { allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, PortRangeErrorMsg)) } @@ -1891,7 +1891,7 @@ func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path } // Validates the given template and ensures that it is in accordance with the desrired selector and replicas. -func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int, fldPath *field.Path) field.ErrorList { +func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if template == nil { allErrs = append(allErrs, field.Required(fldPath, "")) @@ -2656,7 +2656,7 @@ func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *fie allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), port.Name, DNS1123LabelErrorMsg)) } } - if !validation.IsValidPortNum(port.Port) { + if !validation.IsValidPortNum(int(port.Port)) { allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, PortRangeErrorMsg)) } if len(port.Protocol) == 0 { diff --git a/pkg/api/validation/validation_test.go b/pkg/api/validation/validation_test.go index 43f4447255..a9811fdb9c 100644 --- a/pkg/api/validation/validation_test.go +++ b/pkg/api/validation/validation_test.go @@ -631,7 +631,7 @@ func TestValidatePersistentVolumeClaimUpdate(t *testing.T) { } func TestValidateVolumes(t *testing.T) { - lun := 1 + lun := int32(1) successCase := []api.Volume{ {Name: "abc", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/mnt/path1"}}}, {Name: "123", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/mnt/path2"}}}, diff --git a/pkg/apis/autoscaling/v1/conversion.go b/pkg/apis/autoscaling/v1/conversion.go index 286ce3fe8a..56c5f8762a 100644 --- a/pkg/apis/autoscaling/v1/conversion.go +++ b/pkg/apis/autoscaling/v1/conversion.go @@ -88,14 +88,14 @@ func Convert_v1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPodAutoscale return err } if in.MinReplicas != nil { - out.MinReplicas = new(int) - *out.MinReplicas = int(*in.MinReplicas) + out.MinReplicas = new(int32) + *out.MinReplicas = *in.MinReplicas } else { out.MinReplicas = nil } - out.MaxReplicas = int(in.MaxReplicas) + out.MaxReplicas = in.MaxReplicas if in.TargetCPUUtilizationPercentage != nil { - out.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: int(*in.TargetCPUUtilizationPercentage)} + out.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: *in.TargetCPUUtilizationPercentage} } return nil } diff --git a/pkg/apis/batch/v1/conversion.go b/pkg/apis/batch/v1/conversion.go index ca6e0bcf5e..9de9cfcddb 100644 --- a/pkg/apis/batch/v1/conversion.go +++ b/pkg/apis/batch/v1/conversion.go @@ -58,24 +58,9 @@ func Convert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conv if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*batch.JobSpec))(in) } - if in.Parallelism != nil { - out.Parallelism = new(int32) - *out.Parallelism = int32(*in.Parallelism) - } else { - out.Parallelism = nil - } - if in.Completions != nil { - out.Completions = new(int32) - *out.Completions = int32(*in.Completions) - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector if in.Selector != nil { out.Selector = new(LabelSelector) @@ -102,24 +87,9 @@ func Convert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conv if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*JobSpec))(in) } - if in.Parallelism != nil { - out.Parallelism = new(int) - *out.Parallelism = int(*in.Parallelism) - } else { - out.Parallelism = nil - } - if in.Completions != nil { - out.Completions = new(int) - *out.Completions = int(*in.Completions) - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds // unable to generate simple pointer conversion for v1.LabelSelector -> unversioned.LabelSelector if in.Selector != nil { out.Selector = new(unversioned.LabelSelector) diff --git a/pkg/apis/batch/validation/validation_test.go b/pkg/apis/batch/validation/validation_test.go index f7a6392211..5b59a4c6e1 100644 --- a/pkg/apis/batch/validation/validation_test.go +++ b/pkg/apis/batch/validation/validation_test.go @@ -84,7 +84,7 @@ func TestValidateJob(t *testing.T) { t.Errorf("expected success for %s: %v", k, errs) } } - negative := -1 + negative := int32(-1) negative64 := int64(-1) errorCases := map[string]batch.Job{ "spec.parallelism:must be greater than or equal to 0": { diff --git a/pkg/apis/extensions/v1beta1/conversion.go b/pkg/apis/extensions/v1beta1/conversion.go index 4d93b71813..fb311e4e8d 100644 --- a/pkg/apis/extensions/v1beta1/conversion.go +++ b/pkg/apis/extensions/v1beta1/conversion.go @@ -110,7 +110,7 @@ func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ScaleStatus))(in) } - out.Replicas = int(in.Replicas) + out.Replicas = in.Replicas // Normally when 2 fields map to the same internal value we favor the old field, since // old clients can't be expected to know about new fields but clients that know about the @@ -140,8 +140,7 @@ func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions. if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*extensions.DeploymentSpec))(in) } - out.Replicas = new(int32) - *out.Replicas = int32(in.Replicas) + out.Replicas = &in.Replicas if in.Selector != nil { out.Selector = new(LabelSelector) if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil { @@ -176,7 +175,7 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentS defaulting.(func(*DeploymentSpec))(in) } if in.Replicas != nil { - out.Replicas = int(*in.Replicas) + out.Replicas = *in.Replicas } if in.Selector != nil { @@ -193,11 +192,8 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentS if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int) - *out.RevisionHistoryLimit = int(*in.RevisionHistoryLimit) - } - out.MinReadySeconds = int(in.MinReadySeconds) + out.RevisionHistoryLimit = in.RevisionHistoryLimit + out.MinReadySeconds = in.MinReadySeconds out.Paused = in.Paused if in.RollbackTo != nil { out.RollbackTo = new(extensions.RollbackConfig) @@ -298,7 +294,7 @@ func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetS defaulting.(func(*ReplicaSetSpec))(in) } if in.Replicas != nil { - out.Replicas = int(*in.Replicas) + out.Replicas = *in.Replicas } if in.Selector != nil { out.Selector = new(unversioned.LabelSelector) @@ -318,24 +314,9 @@ func Convert_batch_JobSpec_To_v1beta1_JobSpec(in *batch.JobSpec, out *JobSpec, s if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*batch.JobSpec))(in) } - if in.Parallelism != nil { - out.Parallelism = new(int32) - *out.Parallelism = int32(*in.Parallelism) - } else { - out.Parallelism = nil - } - if in.Completions != nil { - out.Completions = new(int32) - *out.Completions = int32(*in.Completions) - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1beta1.LabelSelector if in.Selector != nil { out.Selector = new(LabelSelector) @@ -370,24 +351,9 @@ func Convert_v1beta1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*JobSpec))(in) } - if in.Parallelism != nil { - out.Parallelism = new(int) - *out.Parallelism = int(*in.Parallelism) - } else { - out.Parallelism = nil - } - if in.Completions != nil { - out.Completions = new(int) - *out.Completions = int(*in.Completions) - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds // unable to generate simple pointer conversion for v1beta1.LabelSelector -> unversioned.LabelSelector if in.Selector != nil { out.Selector = new(unversioned.LabelSelector) diff --git a/pkg/apis/extensions/validation/validation.go b/pkg/apis/extensions/validation/validation.go index 181e13e352..481bbda245 100644 --- a/pkg/apis/extensions/validation/validation.go +++ b/pkg/apis/extensions/validation/validation.go @@ -583,7 +583,7 @@ func ValidateReplicaSetSpec(spec *extensions.ReplicaSetSpec, fldPath *field.Path } // Validates the given template and ensures that it is in accordance with the desired selector and replicas. -func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int, fldPath *field.Path) field.ErrorList { +func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if template == nil { allErrs = append(allErrs, field.Required(fldPath, "")) diff --git a/pkg/apis/extensions/validation/validation_test.go b/pkg/apis/extensions/validation/validation_test.go index 28464bc2e8..51e09597b7 100644 --- a/pkg/apis/extensions/validation/validation_test.go +++ b/pkg/apis/extensions/validation/validation_test.go @@ -41,7 +41,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { Name: "myrc", Subresource: "scale", }, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70}, }, @@ -57,7 +57,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { Name: "myrc", Subresource: "scale", }, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, }, }, @@ -75,7 +75,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { Name: "myrc", Subresource: "scale", }, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, }, }, @@ -95,7 +95,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, Spec: extensions.HorizontalPodAutoscalerSpec{ ScaleRef: extensions.SubresourceReference{Name: "myrc", Subresource: "scale"}, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70}, }, @@ -107,7 +107,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, Spec: extensions.HorizontalPodAutoscalerSpec{ ScaleRef: extensions.SubresourceReference{Kind: "..", Name: "myrc", Subresource: "scale"}, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70}, }, @@ -119,7 +119,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, Spec: extensions.HorizontalPodAutoscalerSpec{ ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Subresource: "scale"}, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70}, }, @@ -131,7 +131,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, Spec: extensions.HorizontalPodAutoscalerSpec{ ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Name: "..", Subresource: "scale"}, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70}, }, @@ -143,7 +143,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, Spec: extensions.HorizontalPodAutoscalerSpec{ ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Name: "myrc", Subresource: ""}, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70}, }, @@ -155,7 +155,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, Spec: extensions.HorizontalPodAutoscalerSpec{ ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Name: "myrc", Subresource: ".."}, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70}, }, @@ -167,7 +167,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, Spec: extensions.HorizontalPodAutoscalerSpec{ ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Name: "myrc", Subresource: "randomsubresource"}, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70}, }, @@ -184,7 +184,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { ScaleRef: extensions.SubresourceReference{ Subresource: "scale", }, - MinReplicas: newInt(-1), + MinReplicas: newInt32(-1), MaxReplicas: 5, }, }, @@ -200,7 +200,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { ScaleRef: extensions.SubresourceReference{ Subresource: "scale", }, - MinReplicas: newInt(7), + MinReplicas: newInt32(7), MaxReplicas: 5, }, }, @@ -216,7 +216,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { ScaleRef: extensions.SubresourceReference{ Subresource: "scale", }, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: -70}, }, @@ -238,7 +238,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { Name: "myrc", Subresource: "scale", }, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, }, }, @@ -259,7 +259,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { Name: "myrc", Subresource: "scale", }, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, }, }, @@ -280,7 +280,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { Name: "myrc", Subresource: "scale", }, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, }, }, @@ -301,7 +301,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { Name: "myrc", Subresource: "scale", }, - MinReplicas: newInt(1), + MinReplicas: newInt32(1), MaxReplicas: 5, }, }, @@ -1709,8 +1709,8 @@ func TestValidateReplicaSet(t *testing.T) { } } -func newInt(val int) *int { - p := new(int) +func newInt32(val int32) *int32 { + p := new(int32) *p = val return p } diff --git a/pkg/client/record/events_cache.go b/pkg/client/record/events_cache.go index 5d93ba6a68..fa76db7958 100644 --- a/pkg/client/record/events_cache.go +++ b/pkg/client/record/events_cache.go @@ -236,7 +236,7 @@ func (e *eventLogger) eventObserve(newEvent *api.Event) (*api.Event, []byte, err event.Name = lastObservation.name event.ResourceVersion = lastObservation.resourceVersion event.FirstTimestamp = lastObservation.firstTimestamp - event.Count = lastObservation.count + 1 + event.Count = int32(lastObservation.count) + 1 eventCopy2 := *event eventCopy2.Count = 0 @@ -251,7 +251,7 @@ func (e *eventLogger) eventObserve(newEvent *api.Event) (*api.Event, []byte, err e.cache.Add( key, eventLog{ - count: event.Count, + count: int(event.Count), firstTimestamp: event.FirstTimestamp, name: event.Name, resourceVersion: event.ResourceVersion, @@ -269,7 +269,7 @@ func (e *eventLogger) updateState(event *api.Event) { e.cache.Add( key, eventLog{ - count: event.Count, + count: int(event.Count), firstTimestamp: event.FirstTimestamp, name: event.Name, resourceVersion: event.ResourceVersion, diff --git a/pkg/client/record/events_cache_test.go b/pkg/client/record/events_cache_test.go index ab88e5e5f7..166550783f 100644 --- a/pkg/client/record/events_cache_test.go +++ b/pkg/client/record/events_cache_test.go @@ -87,7 +87,7 @@ func makeSimilarEvents(num int, template api.Event, messagePrefix string) []api. } func setCount(event api.Event, count int) api.Event { - event.Count = count + event.Count = int32(count) return event } diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index af40aed1dc..f9bab0fe6b 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -717,8 +717,8 @@ func loadBalancerPortRange(ports []api.ServicePort) (string, error) { return "", fmt.Errorf("Invalid protocol %s, only TCP and UDP are supported", string(ports[0].Protocol)) } - minPort := 65536 - maxPort := 0 + minPort := int32(65536) + maxPort := int32(0) for i := range ports { if ports[i].Port < minPort { minPort = ports[i].Port @@ -776,7 +776,7 @@ func (gce *GCECloud) firewallNeedsUpdate(name, serviceName, region, ipAddress st // Make sure the allowed ports match. allowedPorts := make([]string, len(ports)) for ix := range ports { - allowedPorts[ix] = strconv.Itoa(ports[ix].Port) + allowedPorts[ix] = strconv.Itoa(int(ports[ix].Port)) } if !slicesEqual(allowedPorts, fw.Allowed[0].Ports) { return true, true, nil @@ -910,7 +910,7 @@ func (gce *GCECloud) updateFirewall(name, region, desc string, sourceRanges nets func (gce *GCECloud) firewallObject(name, region, desc string, sourceRanges netsets.IPNet, ports []api.ServicePort, hosts []*gceInstance) (*compute.Firewall, error) { allowedPorts := make([]string, len(ports)) for ix := range ports { - allowedPorts[ix] = strconv.Itoa(ports[ix].Port) + allowedPorts[ix] = strconv.Itoa(int(ports[ix].Port)) } hostTags, err := gce.computeHostTags(hosts) if err != nil { @@ -1248,7 +1248,7 @@ func (gce *GCECloud) CreateFirewall(name, desc string, sourceRanges netsets.IPNe // if UDP ports are required. This means the method signature will change // forcing downstream clients to refactor interfaces. for _, p := range ports { - svcPorts = append(svcPorts, api.ServicePort{Port: int(p), Protocol: api.ProtocolTCP}) + svcPorts = append(svcPorts, api.ServicePort{Port: int32(p), Protocol: api.ProtocolTCP}) } hosts, err := gce.getInstancesByNames(hostNames) if err != nil { @@ -1282,7 +1282,7 @@ func (gce *GCECloud) UpdateFirewall(name, desc string, sourceRanges netsets.IPNe // if UDP ports are required. This means the method signature will change, // forcing downstream clients to refactor interfaces. for _, p := range ports { - svcPorts = append(svcPorts, api.ServicePort{Port: int(p), Protocol: api.ProtocolTCP}) + svcPorts = append(svcPorts, api.ServicePort{Port: int32(p), Protocol: api.ProtocolTCP}) } hosts, err := gce.getInstancesByNames(hostNames) if err != nil { diff --git a/pkg/cloudprovider/providers/openstack/openstack.go b/pkg/cloudprovider/providers/openstack/openstack.go index 059d3b47da..8c5b8ac4cb 100644 --- a/pkg/cloudprovider/providers/openstack/openstack.go +++ b/pkg/cloudprovider/providers/openstack/openstack.go @@ -740,7 +740,7 @@ func (lb *LoadBalancer) EnsureLoadBalancer(apiService *api.Service, hosts []stri _, err = members.Create(lb.network, members.CreateOpts{ PoolID: pool.ID, - ProtocolPort: ports[0].NodePort, //TODO: need to handle multi-port + ProtocolPort: int(ports[0].NodePort), //TODO: need to handle multi-port Address: addr, }).Extract() if err != nil { @@ -774,7 +774,7 @@ func (lb *LoadBalancer) EnsureLoadBalancer(apiService *api.Service, hosts []stri Name: name, Description: fmt.Sprintf("Kubernetes external service %s", name), Protocol: "TCP", - ProtocolPort: ports[0].Port, //TODO: need to handle multi-port + ProtocolPort: int(ports[0].Port), //TODO: need to handle multi-port PoolID: pool.ID, SubnetID: lb.opts.SubnetId, Persistence: persistence, diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index ff043d24e0..405b4500dc 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -576,7 +576,7 @@ func podReadyTime(pod *api.Pod) unversioned.Time { func maxContainerRestarts(pod *api.Pod) int { maxRestarts := 0 for _, c := range pod.Status.ContainerStatuses { - maxRestarts = integer.IntMax(maxRestarts, c.RestartCount) + maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) } return maxRestarts } diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index 3a383caf70..423e42703e 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -60,7 +60,7 @@ func newReplicationController(replicas int) *api.ReplicationController { ResourceVersion: "18", }, Spec: api.ReplicationControllerSpec{ - Replicas: replicas, + Replicas: int32(replicas), Selector: map[string]string{"foo": "bar"}, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ diff --git a/pkg/controller/daemon/controller.go b/pkg/controller/daemon/controller.go index 8e6d970f42..33dc6ab6cd 100644 --- a/pkg/controller/daemon/controller.go +++ b/pkg/controller/daemon/controller.go @@ -553,15 +553,15 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) { } func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int) error { - if ds.Status.DesiredNumberScheduled == desiredNumberScheduled && ds.Status.CurrentNumberScheduled == currentNumberScheduled && ds.Status.NumberMisscheduled == numberMisscheduled { + if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled && int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled && int(ds.Status.NumberMisscheduled) == numberMisscheduled { return nil } var updateErr, getErr error for i := 0; i <= StatusUpdateRetries; i++ { - ds.Status.DesiredNumberScheduled = desiredNumberScheduled - ds.Status.CurrentNumberScheduled = currentNumberScheduled - ds.Status.NumberMisscheduled = numberMisscheduled + ds.Status.DesiredNumberScheduled = int32(desiredNumberScheduled) + ds.Status.CurrentNumberScheduled = int32(currentNumberScheduled) + ds.Status.NumberMisscheduled = int32(numberMisscheduled) _, updateErr = dsClient.UpdateStatus(ds) if updateErr == nil { diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index 5af0be405a..35a22e8f6e 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -1065,12 +1065,12 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep } // cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted. -func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int) ([]*extensions.ReplicaSet, int, error) { +func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int32) ([]*extensions.ReplicaSet, int32, error) { sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs)) // Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order // such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will // been deleted first and won't increase unavailability. - totalScaledDown := 0 + totalScaledDown := int32(0) for i, targetRS := range oldRSs { if totalScaledDown >= maxCleanupCount { break @@ -1088,7 +1088,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re continue } - scaledDownCount := integer.IntMin(maxCleanupCount-totalScaledDown, targetRS.Spec.Replicas-readyPodCount) + scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(targetRS.Spec.Replicas-readyPodCount))) newReplicasCount := targetRS.Spec.Replicas - scaledDownCount if newReplicasCount > targetRS.Spec.Replicas { return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount) @@ -1105,7 +1105,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re // scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate". // Need check maxUnavailable to ensure availability -func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int, error) { +func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int32, error) { _, maxUnavailable, err := deploymentutil.ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas) if err != nil { return 0, err @@ -1126,7 +1126,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [ sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs)) - totalScaledDown := 0 + totalScaledDown := int32(0) totalScaleDownCount := readyPodCount - minAvailable for _, targetRS := range oldRSs { if totalScaledDown >= totalScaleDownCount { @@ -1138,7 +1138,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [ continue } // Scale down. - scaleDownCount := integer.IntMin(targetRS.Spec.Replicas, totalScaleDownCount-totalScaledDown) + scaleDownCount := int32(integer.IntMin(int(targetRS.Spec.Replicas), int(totalScaleDownCount-totalScaledDown))) newReplicasCount := targetRS.Spec.Replicas - scaleDownCount if newReplicasCount > targetRS.Spec.Replicas { return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount) @@ -1180,7 +1180,7 @@ func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extension } func (dc *DeploymentController) cleanupOldReplicaSets(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error { - diff := len(oldRSs) - *deployment.Spec.RevisionHistoryLimit + diff := int32(len(oldRSs)) - *deployment.Spec.RevisionHistoryLimit if diff <= 0 { return nil } @@ -1189,7 +1189,7 @@ func (dc *DeploymentController) cleanupOldReplicaSets(oldRSs []*extensions.Repli var errList []error // TODO: This should be parallelized. - for i := 0; i < diff; i++ { + for i := int32(0); i < diff; i++ { rs := oldRSs[i] // Avoid delete replica set with non-zero replica counts if rs.Status.Replicas != 0 || rs.Spec.Replicas != 0 || rs.Generation > rs.Status.ObservedGeneration { @@ -1223,7 +1223,7 @@ func (dc *DeploymentController) updateDeploymentStatus(allRSs []*extensions.Repl return err } -func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (totalActualReplicas, updatedReplicas, availableReplicas, unavailableReplicas int, err error) { +func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (totalActualReplicas, updatedReplicas, availableReplicas, unavailableReplicas int32, err error) { totalActualReplicas = deploymentutil.GetActualReplicaCountForReplicaSets(allRSs) updatedReplicas = deploymentutil.GetActualReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}) minReadySeconds := deployment.Spec.MinReadySeconds @@ -1237,7 +1237,7 @@ func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet, return } -func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) { +func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) { // No need to scale if rs.Spec.Replicas == newScale { return false, rs, nil @@ -1257,7 +1257,7 @@ func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.Rep return true, newRS, err } -func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int) (*extensions.ReplicaSet, error) { +func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int32) (*extensions.ReplicaSet, error) { // TODO: Using client for now, update to use store when it is ready. // NOTE: This mutates the ReplicaSet passed in. Not sure if that's a good idea. rs.Spec.Replicas = newScale diff --git a/pkg/controller/deployment/deployment_controller_test.go b/pkg/controller/deployment/deployment_controller_test.go index c2090d0396..293bff7eb1 100644 --- a/pkg/controller/deployment/deployment_controller_test.go +++ b/pkg/controller/deployment/deployment_controller_test.go @@ -39,7 +39,7 @@ func rs(name string, replicas int, selector map[string]string) *exp.ReplicaSet { Name: name, }, Spec: exp.ReplicaSetSpec{ - Replicas: replicas, + Replicas: int32(replicas), Selector: &unversioned.LabelSelector{MatchLabels: selector}, Template: api.PodTemplateSpec{}, }, @@ -49,7 +49,7 @@ func rs(name string, replicas int, selector map[string]string) *exp.ReplicaSet { func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *exp.ReplicaSet { rs := rs(name, specReplicas, selector) rs.Status = exp.ReplicaSetStatus{ - Replicas: statusReplicas, + Replicas: int32(statusReplicas), } return rs } @@ -60,7 +60,7 @@ func deployment(name string, replicas int, maxSurge, maxUnavailable intstr.IntOr Name: name, }, Spec: exp.DeploymentSpec{ - Replicas: replicas, + Replicas: int32(replicas), Strategy: exp.DeploymentStrategy{ Type: exp.RollingUpdateDeploymentStrategyType, RollingUpdate: &exp.RollingUpdateDeployment{ @@ -75,6 +75,11 @@ func deployment(name string, replicas int, maxSurge, maxUnavailable intstr.IntOr var alwaysReady = func() bool { return true } func newDeployment(replicas int, revisionHistoryLimit *int) *exp.Deployment { + var v *int32 + if revisionHistoryLimit != nil { + v = new(int32) + *v = int32(*revisionHistoryLimit) + } d := exp.Deployment{ TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()}, ObjectMeta: api.ObjectMeta{ @@ -88,7 +93,7 @@ func newDeployment(replicas int, revisionHistoryLimit *int) *exp.Deployment { Type: exp.RollingUpdateDeploymentStrategyType, RollingUpdate: &exp.RollingUpdateDeployment{}, }, - Replicas: replicas, + Replicas: int32(replicas), Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, Template: api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -105,7 +110,7 @@ func newDeployment(replicas int, revisionHistoryLimit *int) *exp.Deployment { }, }, }, - RevisionHistoryLimit: revisionHistoryLimit, + RevisionHistoryLimit: v, }, } return &d @@ -118,7 +123,7 @@ func newReplicaSet(d *exp.Deployment, name string, replicas int) *exp.ReplicaSet Namespace: api.NamespaceDefault, }, Spec: exp.ReplicaSetSpec{ - Replicas: replicas, + Replicas: int32(replicas), Template: d.Spec.Template, }, } @@ -211,7 +216,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) { continue } updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*exp.ReplicaSet) - if e, a := test.expectedNewReplicas, updated.Spec.Replicas; e != a { + if e, a := test.expectedNewReplicas, int(updated.Spec.Replicas); e != a { t.Errorf("expected update to %d replicas, got %d", e, a) } } @@ -470,12 +475,12 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) { client: &fakeClientset, eventRecorder: &record.FakeRecorder{}, } - _, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, &deployment, test.maxCleanupCount) + _, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, &deployment, int32(test.maxCleanupCount)) if err != nil { t.Errorf("unexpected error: %v", err) continue } - if cleanupCount != test.cleanupCountExpected { + if int(cleanupCount) != test.cleanupCountExpected { t.Errorf("expected %v unhealthy replicas been cleaned up, got %v", test.cleanupCountExpected, cleanupCount) continue } @@ -598,7 +603,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing continue } updated := updateAction.GetObject().(*exp.ReplicaSet) - if e, a := test.expectedOldReplicas, updated.Spec.Replicas; e != a { + if e, a := test.expectedOldReplicas, int(updated.Spec.Replicas); e != a { t.Errorf("expected update to %d replicas, got %d", e, a) } } diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index d8b7cd6290..513b8fec0e 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -378,7 +378,7 @@ func (e *EndpointController) syncService(key string) { continue } - epp := api.EndpointPort{Name: portName, Port: portNum, Protocol: portProto} + epp := api.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto} epa := api.EndpointAddress{ IP: pod.Status.PodIP, TargetRef: &api.ObjectReference{ diff --git a/pkg/controller/endpoint/endpoints_controller_test.go b/pkg/controller/endpoint/endpoints_controller_test.go index 7efac8e2b2..163ba45de8 100644 --- a/pkg/controller/endpoint/endpoints_controller_test.go +++ b/pkg/controller/endpoint/endpoints_controller_test.go @@ -65,7 +65,7 @@ func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotRea } for j := 0; j < nPorts; j++ { p.Spec.Containers[0].Ports = append(p.Spec.Containers[0].Ports, - api.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: 8080 + j}) + api.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: int32(8080 + j)}) } store.Add(p) } diff --git a/pkg/controller/job/controller.go b/pkg/controller/job/controller.go index ee495555b1..e233471dd2 100644 --- a/pkg/controller/job/controller.go +++ b/pkg/controller/job/controller.go @@ -339,7 +339,7 @@ func (jm *JobController) syncJob(key string) error { } activePods := controller.FilterActivePods(podList.Items) - active := len(activePods) + active := int32(len(activePods)) succeeded, failed := getStatus(podList.Items) conditions := len(job.Status.Conditions) if job.Status.StartTime == nil { @@ -358,9 +358,9 @@ func (jm *JobController) syncJob(key string) error { // some sort of solution to above problem. // kill remaining active pods wait := sync.WaitGroup{} - wait.Add(active) - for i := 0; i < active; i++ { - go func(ix int) { + wait.Add(int(active)) + for i := int32(0); i < active; i++ { + go func(ix int32) { defer wait.Done() if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, &job); err != nil { defer utilruntime.HandleError(err) @@ -449,17 +449,17 @@ func newCondition(conditionType batch.JobConditionType, reason, message string) } // getStatus returns no of succeeded and failed pods running a job -func getStatus(pods []api.Pod) (succeeded, failed int) { - succeeded = filterPods(pods, api.PodSucceeded) - failed = filterPods(pods, api.PodFailed) +func getStatus(pods []api.Pod) (succeeded, failed int32) { + succeeded = int32(filterPods(pods, api.PodSucceeded)) + failed = int32(filterPods(pods, api.PodFailed)) return } // manageJob is the core method responsible for managing the number of running // pods according to what is specified in the job.Spec. -func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *batch.Job) int { +func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int32, job *batch.Job) int32 { var activeLock sync.Mutex - active := len(activePods) + active := int32(len(activePods)) parallelism := *job.Spec.Parallelism jobKey, err := controller.KeyFunc(job) if err != nil { @@ -469,7 +469,7 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ba if active > parallelism { diff := active - parallelism - jm.expectations.ExpectDeletions(jobKey, diff) + jm.expectations.ExpectDeletions(jobKey, int(diff)) glog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff) // Sort the pods in the order such that not-ready < ready, unscheduled // < scheduled, and pending < running. This ensures that we delete pods @@ -478,9 +478,9 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ba active -= diff wait := sync.WaitGroup{} - wait.Add(diff) - for i := 0; i < diff; i++ { - go func(ix int) { + wait.Add(int(diff)) + for i := int32(0); i < diff; i++ { + go func(ix int32) { defer wait.Done() if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, job); err != nil { defer utilruntime.HandleError(err) @@ -495,7 +495,7 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ba wait.Wait() } else if active < parallelism { - wantActive := 0 + wantActive := int32(0) if job.Spec.Completions == nil { // Job does not specify a number of completions. Therefore, number active // should be equal to parallelism, unless the job has seen at least @@ -518,13 +518,13 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ba glog.Errorf("More active than wanted: job %q, want %d, have %d", jobKey, wantActive, active) diff = 0 } - jm.expectations.ExpectCreations(jobKey, diff) + jm.expectations.ExpectCreations(jobKey, int(diff)) glog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, wantActive, diff) active += diff wait := sync.WaitGroup{} - wait.Add(diff) - for i := 0; i < diff; i++ { + wait.Add(int(diff)) + for i := int32(0); i < diff; i++ { go func() { defer wait.Done() if err := jm.podControl.CreatePods(job.Namespace, &job.Spec.Template, job); err != nil { diff --git a/pkg/controller/job/controller_test.go b/pkg/controller/job/controller_test.go index f16d4eda5c..2c7d25eaa3 100644 --- a/pkg/controller/job/controller_test.go +++ b/pkg/controller/job/controller_test.go @@ -37,7 +37,7 @@ import ( var alwaysReady = func() bool { return true } -func newJob(parallelism, completions int) *batch.Job { +func newJob(parallelism, completions int32) *batch.Job { j := &batch.Job{ ObjectMeta: api.ObjectMeta{ Name: "foobar", @@ -86,9 +86,9 @@ func getKey(job *batch.Job, t *testing.T) string { } // create count pods with the given phase for the given job -func newPodList(count int, status api.PodPhase, job *batch.Job) []api.Pod { +func newPodList(count int32, status api.PodPhase, job *batch.Job) []api.Pod { pods := []api.Pod{} - for i := 0; i < count; i++ { + for i := int32(0); i < count; i++ { newPod := api.Pod{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("pod-%v", rand.String(10)), @@ -105,21 +105,21 @@ func newPodList(count int, status api.PodPhase, job *batch.Job) []api.Pod { func TestControllerSyncJob(t *testing.T) { testCases := map[string]struct { // job setup - parallelism int - completions int + parallelism int32 + completions int32 // pod setup podControllerError error - activePods int - succeededPods int - failedPods int + activePods int32 + succeededPods int32 + failedPods int32 // expectations - expectedCreations int - expectedDeletions int - expectedActive int - expectedSucceeded int - expectedFailed int + expectedCreations int32 + expectedDeletions int32 + expectedActive int32 + expectedSucceeded int32 + expectedFailed int32 expectedComplete bool }{ "job start": { @@ -237,10 +237,10 @@ func TestControllerSyncJob(t *testing.T) { } // validate created/deleted pods - if len(fakePodControl.Templates) != tc.expectedCreations { + if int32(len(fakePodControl.Templates)) != tc.expectedCreations { t.Errorf("%s: unexpected number of creates. Expected %d, saw %d\n", name, tc.expectedCreations, len(fakePodControl.Templates)) } - if len(fakePodControl.DeletePodName) != tc.expectedDeletions { + if int32(len(fakePodControl.DeletePodName)) != tc.expectedDeletions { t.Errorf("%s: unexpected number of deletes. Expected %d, saw %d\n", name, tc.expectedDeletions, len(fakePodControl.DeletePodName)) } // validate status @@ -266,21 +266,21 @@ func TestControllerSyncJob(t *testing.T) { func TestSyncJobPastDeadline(t *testing.T) { testCases := map[string]struct { // job setup - parallelism int - completions int + parallelism int32 + completions int32 activeDeadlineSeconds int64 startTime int64 // pod setup - activePods int - succeededPods int - failedPods int + activePods int32 + succeededPods int32 + failedPods int32 // expectations - expectedDeletions int - expectedActive int - expectedSucceeded int - expectedFailed int + expectedDeletions int32 + expectedActive int32 + expectedSucceeded int32 + expectedFailed int32 }{ "activeDeadlineSeconds less than single pod execution": { 1, 1, 10, 15, @@ -335,10 +335,10 @@ func TestSyncJobPastDeadline(t *testing.T) { } // validate created/deleted pods - if len(fakePodControl.Templates) != 0 { + if int32(len(fakePodControl.Templates)) != 0 { t.Errorf("%s: unexpected number of creates. Expected 0, saw %d\n", name, len(fakePodControl.Templates)) } - if len(fakePodControl.DeletePodName) != tc.expectedDeletions { + if int32(len(fakePodControl.DeletePodName)) != tc.expectedDeletions { t.Errorf("%s: unexpected number of deletes. Expected %d, saw %d\n", name, tc.expectedDeletions, len(fakePodControl.DeletePodName)) } // validate status diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 5e33493e6f..6f48b95905 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -128,8 +128,8 @@ func (a *HorizontalController) Run(stopCh <-chan struct{}) { glog.Infof("Shutting down HPA Controller") } -func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale) (int, *int, time.Time, error) { - targetUtilization := defaultTargetCPUUtilizationPercentage +func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale) (int32, *int32, time.Time, error) { + targetUtilization := int32(defaultTargetCPUUtilizationPercentage) if hpa.Spec.CPUUtilization != nil { targetUtilization = hpa.Spec.CPUUtilization.TargetPercentage } @@ -155,11 +155,13 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions. return 0, nil, time.Time{}, fmt.Errorf("failed to get CPU utilization: %v", err) } - usageRatio := float64(*currentUtilization) / float64(targetUtilization) + utilization := int32(*currentUtilization) + + usageRatio := float64(utilization) / float64(targetUtilization) if math.Abs(1.0-usageRatio) > tolerance { - return int(math.Ceil(usageRatio * float64(currentReplicas))), currentUtilization, timestamp, nil + return int32(math.Ceil(usageRatio * float64(currentReplicas))), &utilization, timestamp, nil } else { - return currentReplicas, currentUtilization, timestamp, nil + return currentReplicas, &utilization, timestamp, nil } } @@ -169,7 +171,7 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions. // status string (also json-serialized extensions.CustomMetricsCurrentStatusList), // last timestamp of the metrics involved in computations or error, if occurred. func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale, - cmAnnotation string) (replicas int, metric string, status string, timestamp time.Time, err error) { + cmAnnotation string) (replicas int32, metric string, status string, timestamp time.Time, err error) { currentReplicas := scale.Status.Replicas replicas = 0 @@ -216,9 +218,9 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.H floatTarget := float64(customMetricTarget.TargetValue.MilliValue()) / 1000.0 usageRatio := *value / floatTarget - replicaCountProposal := 0 + replicaCountProposal := int32(0) if math.Abs(1.0-usageRatio) > tolerance { - replicaCountProposal = int(math.Ceil(usageRatio * float64(currentReplicas))) + replicaCountProposal = int32(math.Ceil(usageRatio * float64(currentReplicas))) } else { replicaCountProposal = currentReplicas } @@ -254,16 +256,16 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPod } currentReplicas := scale.Status.Replicas - cpuDesiredReplicas := 0 - var cpuCurrentUtilization *int = nil + cpuDesiredReplicas := int32(0) + var cpuCurrentUtilization *int32 = nil cpuTimestamp := time.Time{} - cmDesiredReplicas := 0 + cmDesiredReplicas := int32(0) cmMetric := "" cmStatus := "" cmTimestamp := time.Time{} - desiredReplicas := 0 + desiredReplicas := int32(0) rescaleReason := "" timestamp := time.Now() @@ -347,7 +349,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPod return a.updateStatus(hpa, currentReplicas, desiredReplicas, cpuCurrentUtilization, cmStatus, rescale) } -func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, timestamp time.Time) bool { +func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, timestamp time.Time) bool { if desiredReplicas != currentReplicas { // Going down only if the usageRatio dropped significantly below the target // and there was no rescaling in the last downscaleForbiddenWindow. @@ -368,14 +370,14 @@ func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desir return false } -func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas int) { +func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas int32) { err := a.updateStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentCPUUtilizationPercentage, hpa.Annotations[HpaCustomMetricsStatusAnnotationName], false) if err != nil { glog.Errorf("%v", err) } } -func (a *HorizontalController) updateStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, cpuCurrentUtilization *int, cmStatus string, rescale bool) error { +func (a *HorizontalController) updateStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, cpuCurrentUtilization *int32, cmStatus string, rescale bool) error { hpa.Status = extensions.HorizontalPodAutoscalerStatus{ CurrentReplicas: currentReplicas, DesiredReplicas: desiredReplicas, diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index 0c6be1fa03..827f52f699 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -67,14 +67,14 @@ type fakeResource struct { type testCase struct { sync.Mutex - minReplicas int - maxReplicas int - initialReplicas int - desiredReplicas int + minReplicas int32 + maxReplicas int32 + initialReplicas int32 + desiredReplicas int32 // CPU target utilization as a percentage of the requested resources. - CPUTarget int - CPUCurrent int + CPUTarget int32 + CPUCurrent int32 verifyCPUCurrent bool reportedLevels []uint64 reportedCPURequests []resource.Quantity @@ -103,7 +103,7 @@ func (tc *testCase) computeCPUCurrent() { for _, req := range tc.reportedCPURequests { requested += int(req.MilliValue()) } - tc.CPUCurrent = 100 * reported / requested + tc.CPUCurrent = int32(100 * reported / requested) } func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset { diff --git a/pkg/controller/replicaset/replica_set.go b/pkg/controller/replicaset/replica_set.go index 6407dc0238..d1bdffb3ef 100644 --- a/pkg/controller/replicaset/replica_set.go +++ b/pkg/controller/replicaset/replica_set.go @@ -421,7 +421,7 @@ func (rsc *ReplicaSetController) worker() { // manageReplicas checks and updates replicas for the given ReplicaSet. func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *extensions.ReplicaSet) { - diff := len(filteredPods) - rs.Spec.Replicas + diff := len(filteredPods) - int(rs.Spec.Replicas) rsKey, err := controller.KeyFunc(rs) if err != nil { glog.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err) diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index 86ae6e8cc0..05a71f0247 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -66,7 +66,7 @@ func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.Repl ResourceVersion: "18", }, Spec: extensions.ReplicaSetSpec{ - Replicas: replicas, + Replicas: int32(replicas), Selector: &unversioned.LabelSelector{MatchLabels: selectorMap}, Template: api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -237,7 +237,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) { labelMap := map[string]string{"foo": "bar"} rs := newReplicaSet(activePods, labelMap) manager.rsStore.Store.Add(rs) - rs.Status = extensions.ReplicaSetStatus{Replicas: activePods} + rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods)} newPodList(manager.podStore.Store, activePods, api.PodRunning, labelMap, rs, "pod") fakePodControl := controller.FakePodControl{} @@ -643,7 +643,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) { // returned a ReplicaSet with replicas=1. if c, ok := action.GetObject().(*extensions.ReplicaSet); !ok { t.Errorf("Expected a ReplicaSet as the argument to update, got %T", c) - } else if c.Status.Replicas != numReplicas { + } else if int(c.Status.Replicas) != numReplicas { t.Errorf("Expected update for ReplicaSet to contain replicas %v, got %v instead", numReplicas, c.Status.Replicas) } @@ -669,7 +669,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) rsSpec := newReplicaSet(numReplicas, labelMap) manager.rsStore.Store.Add(rsSpec) - expectedPods := 0 + expectedPods := int32(0) pods := newPodList(nil, numReplicas, api.PodPending, labelMap, rsSpec, "pod") rsKey, err := controller.KeyFunc(rsSpec) @@ -678,7 +678,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) } // Size up the controller, then size it down, and confirm the expected create/delete pattern - for _, replicas := range []int{numReplicas, 0} { + for _, replicas := range []int32{int32(numReplicas), 0} { rsSpec.Spec.Replicas = replicas manager.rsStore.Store.Add(rsSpec) @@ -688,21 +688,21 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) // The store accrues active pods. It's also used by the ReplicaSet to determine how many // replicas to create. - activePods := len(manager.podStore.Store.List()) + activePods := int32(len(manager.podStore.Store.List())) if replicas != 0 { // This is the number of pods currently "in flight". They were created by the // ReplicaSet controller above, which then puts the ReplicaSet to sleep till // all of them have been observed. expectedPods = replicas - activePods - if expectedPods > burstReplicas { - expectedPods = burstReplicas + if expectedPods > int32(burstReplicas) { + expectedPods = int32(burstReplicas) } // This validates the ReplicaSet manager sync actually created pods - validateSyncReplicaSet(t, &fakePodControl, expectedPods, 0) + validateSyncReplicaSet(t, &fakePodControl, int(expectedPods), 0) // This simulates the watch events for all but 1 of the expected pods. // None of these should wake the controller because it has expectations==BurstReplicas. - for i := 0; i < expectedPods-1; i++ { + for i := int32(0); i < expectedPods-1; i++ { manager.podStore.Store.Add(&pods.Items[i]) manager.addPod(&pods.Items[i]) } @@ -716,10 +716,10 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) } } else { expectedPods = (replicas - activePods) * -1 - if expectedPods > burstReplicas { - expectedPods = burstReplicas + if expectedPods > int32(burstReplicas) { + expectedPods = int32(burstReplicas) } - validateSyncReplicaSet(t, &fakePodControl, 0, expectedPods) + validateSyncReplicaSet(t, &fakePodControl, 0, int(expectedPods)) // To accurately simulate a watch we must delete the exact pods // the rs is waiting for. @@ -782,12 +782,12 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) } // Confirm that we've created the right number of replicas - activePods := len(manager.podStore.Store.List()) + activePods := int32(len(manager.podStore.Store.List())) if activePods != rsSpec.Spec.Replicas { t.Fatalf("Unexpected number of active pods, expected %d, got %d", rsSpec.Spec.Replicas, activePods) } // Replenish the pod list, since we cut it down sizing up - pods = newPodList(nil, replicas, api.PodRunning, labelMap, rsSpec, "pod") + pods = newPodList(nil, int(replicas), api.PodRunning, labelMap, rsSpec, "pod") } } diff --git a/pkg/controller/replicaset/replica_set_utils.go b/pkg/controller/replicaset/replica_set_utils.go index 382f2aee44..fd8bd70626 100644 --- a/pkg/controller/replicaset/replica_set_utils.go +++ b/pkg/controller/replicaset/replica_set_utils.go @@ -31,8 +31,8 @@ func updateReplicaCount(rsClient client.ReplicaSetInterface, rs extensions.Repli // This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since // we do a periodic relist every 30s. If the generations differ but the replicas are // the same, a caller might've resized to the same replica count. - if rs.Status.Replicas == numReplicas && - rs.Status.FullyLabeledReplicas == numFullyLabeledReplicas && + if int(rs.Status.Replicas) == numReplicas && + int(rs.Status.FullyLabeledReplicas) == numFullyLabeledReplicas && rs.Generation == rs.Status.ObservedGeneration { return nil } @@ -49,7 +49,7 @@ func updateReplicaCount(rsClient client.ReplicaSetInterface, rs extensions.Repli fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, numFullyLabeledReplicas) + fmt.Sprintf("sequence No: %v->%v", rs.Status.ObservedGeneration, generation)) - rs.Status = extensions.ReplicaSetStatus{Replicas: numReplicas, FullyLabeledReplicas: numFullyLabeledReplicas, ObservedGeneration: generation} + rs.Status = extensions.ReplicaSetStatus{Replicas: int32(numReplicas), FullyLabeledReplicas: int32(numFullyLabeledReplicas), ObservedGeneration: generation} _, updateErr = rsClient.UpdateStatus(rs) if updateErr == nil || i >= statusUpdateRetries { return updateErr diff --git a/pkg/controller/replication/replication_controller.go b/pkg/controller/replication/replication_controller.go index 1d716672ac..bd5b52c7ea 100644 --- a/pkg/controller/replication/replication_controller.go +++ b/pkg/controller/replication/replication_controller.go @@ -429,7 +429,7 @@ func (rm *ReplicationManager) worker() { // manageReplicas checks and updates replicas for the given replication controller. func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.ReplicationController) { - diff := len(filteredPods) - rc.Spec.Replicas + diff := len(filteredPods) - int(rc.Spec.Replicas) rcKey, err := controller.KeyFunc(rc) if err != nil { glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) diff --git a/pkg/controller/replication/replication_controller_test.go b/pkg/controller/replication/replication_controller_test.go index 83cf06d2e6..c974513bd9 100644 --- a/pkg/controller/replication/replication_controller_test.go +++ b/pkg/controller/replication/replication_controller_test.go @@ -65,7 +65,7 @@ func newReplicationController(replicas int) *api.ReplicationController { ResourceVersion: "18", }, Spec: api.ReplicationControllerSpec{ - Replicas: replicas, + Replicas: int32(replicas), Selector: map[string]string{"foo": "bar"}, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -231,7 +231,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) { activePods := 5 rc := newReplicationController(activePods) manager.rcStore.Store.Add(rc) - rc.Status = api.ReplicationControllerStatus{Replicas: activePods} + rc.Status = api.ReplicationControllerStatus{Replicas: int32(activePods)} newPodList(manager.podStore.Store, activePods, api.PodRunning, rc, "pod") fakePodControl := controller.FakePodControl{} @@ -628,7 +628,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) { // returned an rc with replicas=1. if c, ok := action.GetObject().(*api.ReplicationController); !ok { t.Errorf("Expected an rc as the argument to update, got %T", c) - } else if c.Status.Replicas != numReplicas { + } else if c.Status.Replicas != int32(numReplicas) { t.Errorf("Expected update for rc to contain replicas %v, got %v instead", numReplicas, c.Status.Replicas) } @@ -664,7 +664,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) // Size up the controller, then size it down, and confirm the expected create/delete pattern for _, replicas := range []int{numReplicas, 0} { - controllerSpec.Spec.Replicas = replicas + controllerSpec.Spec.Replicas = int32(replicas) manager.rcStore.Store.Add(controllerSpec) for i := 0; i < numReplicas; i += burstReplicas { @@ -765,7 +765,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) } // Confirm that we've created the right number of replicas - activePods := len(manager.podStore.Store.List()) + activePods := int32(len(manager.podStore.Store.List())) if activePods != controllerSpec.Spec.Replicas { t.Fatalf("Unexpected number of active pods, expected %d, got %d", controllerSpec.Spec.Replicas, activePods) } diff --git a/pkg/controller/replication/replication_controller_utils.go b/pkg/controller/replication/replication_controller_utils.go index 0af2530c1f..0383fa9464 100644 --- a/pkg/controller/replication/replication_controller_utils.go +++ b/pkg/controller/replication/replication_controller_utils.go @@ -31,8 +31,8 @@ func updateReplicaCount(rcClient unversionedcore.ReplicationControllerInterface, // This is the steady state. It happens when the rc doesn't have any expectations, since // we do a periodic relist every 30s. If the generations differ but the replicas are // the same, a caller might've resized to the same replica count. - if controller.Status.Replicas == numReplicas && - controller.Status.FullyLabeledReplicas == numFullyLabeledReplicas && + if int(controller.Status.Replicas) == numReplicas && + int(controller.Status.FullyLabeledReplicas) == numFullyLabeledReplicas && controller.Generation == controller.Status.ObservedGeneration { return nil } @@ -49,7 +49,7 @@ func updateReplicaCount(rcClient unversionedcore.ReplicationControllerInterface, fmt.Sprintf("fullyLabeledReplicas %d->%d, ", controller.Status.FullyLabeledReplicas, numFullyLabeledReplicas) + fmt.Sprintf("sequence No: %v->%v", controller.Status.ObservedGeneration, generation)) - rc.Status = api.ReplicationControllerStatus{Replicas: numReplicas, FullyLabeledReplicas: numFullyLabeledReplicas, ObservedGeneration: generation} + rc.Status = api.ReplicationControllerStatus{Replicas: int32(numReplicas), FullyLabeledReplicas: int32(numFullyLabeledReplicas), ObservedGeneration: generation} _, updateErr = rcClient.UpdateStatus(rc) if updateErr == nil || i >= statusUpdateRetries { return updateErr diff --git a/pkg/kubectl/autoscale.go b/pkg/kubectl/autoscale.go index e41058d4e8..5aa9b0bfd9 100644 --- a/pkg/kubectl/autoscale.go +++ b/pkg/kubectl/autoscale.go @@ -97,14 +97,15 @@ func (HorizontalPodAutoscalerV1Beta1) Generate(genericParams map[string]interfac APIVersion: params["scaleRef-apiVersion"], Subresource: scaleSubResource, }, - MaxReplicas: max, + MaxReplicas: int32(max), }, } if min > 0 { - scaler.Spec.MinReplicas = &min + v := int32(min) + scaler.Spec.MinReplicas = &v } if cpu >= 0 { - scaler.Spec.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: cpu} + scaler.Spec.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: int32(cpu)} } return &scaler, nil } diff --git a/pkg/kubectl/cmd/clusterinfo.go b/pkg/kubectl/cmd/clusterinfo.go index 66843bf317..9fc3035865 100644 --- a/pkg/kubectl/cmd/clusterinfo.go +++ b/pkg/kubectl/cmd/clusterinfo.go @@ -83,7 +83,7 @@ func RunClusterInfo(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command) error ip = ingress.Hostname } for _, port := range service.Spec.Ports { - link += "http://" + ip + ":" + strconv.Itoa(port.Port) + " " + link += "http://" + ip + ":" + strconv.Itoa(int(port.Port)) + " " } } else { if len(client.GroupVersion.Group) == 0 { diff --git a/pkg/kubectl/cmd/create.go b/pkg/kubectl/cmd/create.go index 81eedf5ef1..05cc2e3de2 100644 --- a/pkg/kubectl/cmd/create.go +++ b/pkg/kubectl/cmd/create.go @@ -173,7 +173,7 @@ See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more d func makePortsString(ports []api.ServicePort, useNodePort bool) string { pieces := make([]string, len(ports)) for ix := range ports { - var port int + var port int32 if useNodePort { port = ports[ix].NodePort } else { diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 35c585ed8c..cadf469297 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -743,7 +743,7 @@ func getPorts(spec api.PodSpec) []string { result := []string{} for _, container := range spec.Containers { for _, port := range container.Ports { - result = append(result, strconv.Itoa(port.ContainerPort)) + result = append(result, strconv.Itoa(int(port.ContainerPort))) } } return result @@ -753,7 +753,7 @@ func getPorts(spec api.PodSpec) []string { func getServicePorts(spec api.ServiceSpec) []string { result := []string{} for _, servicePort := range spec.Ports { - result = append(result, strconv.Itoa(servicePort.Port)) + result = append(result, strconv.Itoa(int(servicePort.Port))) } return result } diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 0a66b987db..be91bf85ad 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -1233,7 +1233,7 @@ func (i *IngressDescriber) describeBackend(ns string, backend *extensions.Ingres spName = sp.Name } case intstr.Int: - if int(backend.ServicePort.IntVal) == sp.Port { + if int32(backend.ServicePort.IntVal) == sp.Port { spName = sp.Name } } diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index 2230588fcf..692d2c78c1 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -596,7 +596,7 @@ func printPodBase(pod *api.Pod, w io.Writer, options PrintOptions) error { for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { container := pod.Status.ContainerStatuses[i] - restarts += container.RestartCount + restarts += int(container.RestartCount) if container.State.Waiting != nil && container.State.Waiting.Reason != "" { reason = container.State.Waiting.Reason } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { diff --git a/pkg/kubectl/resource_printer_test.go b/pkg/kubectl/resource_printer_test.go index f9905d35f2..f0a30edeeb 100644 --- a/pkg/kubectl/resource_printer_test.go +++ b/pkg/kubectl/resource_printer_test.go @@ -1342,7 +1342,7 @@ func TestPrintDaemonSet(t *testing.T) { } func TestPrintJob(t *testing.T) { - completions := 2 + completions := int32(2) tests := []struct { job batch.Job expect string diff --git a/pkg/kubectl/rolling_updater.go b/pkg/kubectl/rolling_updater.go index e69d889a73..bdbd078cbe 100644 --- a/pkg/kubectl/rolling_updater.go +++ b/pkg/kubectl/rolling_updater.go @@ -114,7 +114,7 @@ type RollingUpdater struct { // cleanup performs post deployment cleanup tasks for newRc and oldRc. cleanup func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error // getReadyPods returns the amount of old and new ready pods. - getReadyPods func(oldRc, newRc *api.ReplicationController) (int, int, error) + getReadyPods func(oldRc, newRc *api.ReplicationController) (int32, int32, error) } // NewRollingUpdater creates a RollingUpdater from a client. @@ -169,11 +169,12 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { fmt.Fprintf(out, "Created %s\n", newRc.Name) } // Extract the desired replica count from the controller. - desired, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation]) + desiredAnnotation, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation]) if err != nil { return fmt.Errorf("Unable to parse annotation for %s: %s=%s", newRc.Name, desiredReplicasAnnotation, newRc.Annotations[desiredReplicasAnnotation]) } + desired := int32(desiredAnnotation) // Extract the original replica count from the old controller, adding the // annotation if it doesn't yet exist. _, hasOriginalAnnotation := oldRc.Annotations[originalReplicasAnnotation] @@ -185,7 +186,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { if existing.Annotations == nil { existing.Annotations = map[string]string{} } - existing.Annotations[originalReplicasAnnotation] = strconv.Itoa(existing.Spec.Replicas) + existing.Annotations[originalReplicasAnnotation] = strconv.Itoa(int(existing.Spec.Replicas)) updated, err := r.c.ReplicationControllers(existing.Namespace).Update(existing) if err != nil { return err @@ -204,7 +205,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { } // The minumum pods which must remain available througout the update // calculated for internal convenience. - minAvailable := integer.IntMax(0, desired-maxUnavailable) + minAvailable := int32(integer.IntMax(0, int(desired-maxUnavailable))) // If the desired new scale is 0, then the max unavailable is necessarily // the effective scale of the old RC regardless of the configuration // (equivalent to 100% maxUnavailable). @@ -258,7 +259,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { // scaleUp scales up newRc to desired by whatever increment is possible given // the configured surge threshold. scaleUp will safely no-op as necessary when // it detects redundancy or other relevant conditions. -func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desired, maxSurge, maxUnavailable int, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) { +func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desired, maxSurge, maxUnavailable int32, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) { // If we're already at the desired, do nothing. if newRc.Spec.Replicas == desired { return newRc, nil @@ -291,7 +292,7 @@ func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desire // scaleDown scales down oldRc to 0 at whatever decrement possible given the // thresholds defined on the config. scaleDown will safely no-op as necessary // when it detects redundancy or other relevant conditions. -func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int, config *RollingUpdaterConfig) (*api.ReplicationController, error) { +func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int32, config *RollingUpdaterConfig) (*api.ReplicationController, error) { // Already scaled down; do nothing. if oldRc.Spec.Replicas == 0 { return oldRc, nil @@ -356,10 +357,10 @@ func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, r // readyPods returns the old and new ready counts for their pods. // If a pod is observed as being ready, it's considered ready even // if it later becomes notReady. -func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController) (int, int, error) { +func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController) (int32, int32, error) { controllers := []*api.ReplicationController{oldRc, newRc} - oldReady := 0 - newReady := 0 + oldReady := int32(0) + newReady := int32(0) for i := range controllers { controller := controllers[i] diff --git a/pkg/kubectl/rolling_updater_test.go b/pkg/kubectl/rolling_updater_test.go index 727e66abca..3daf289d45 100644 --- a/pkg/kubectl/rolling_updater_test.go +++ b/pkg/kubectl/rolling_updater_test.go @@ -48,7 +48,7 @@ func oldRc(replicas int, original int) *api.ReplicationController { }, }, Spec: api.ReplicationControllerSpec{ - Replicas: replicas, + Replicas: int32(replicas), Selector: map[string]string{"version": "v1"}, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -58,7 +58,7 @@ func oldRc(replicas int, original int) *api.ReplicationController { }, }, Status: api.ReplicationControllerStatus{ - Replicas: replicas, + Replicas: int32(replicas), }, } } @@ -794,7 +794,7 @@ Scaling foo-v2 up to 2 } if expected == -1 { t.Fatalf("unexpected scale of %s to %d", rc.Name, rc.Spec.Replicas) - } else if e, a := expected, rc.Spec.Replicas; e != a { + } else if e, a := expected, int(rc.Spec.Replicas); e != a { t.Fatalf("expected scale of %s to %d, got %d", rc.Name, e, a) } // Simulate the scale. @@ -810,7 +810,7 @@ Scaling foo-v2 up to 2 }, } // Set up a mock readiness check which handles the test assertions. - updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int, int, error) { + updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int32, int32, error) { // Return simulated readiness, and throw an error if this call has no // expectations defined. oldReady := next(&oldReady) @@ -818,7 +818,7 @@ Scaling foo-v2 up to 2 if oldReady == -1 || newReady == -1 { t.Fatalf("unexpected getReadyPods call for:\noldRc: %+v\nnewRc: %+v", oldRc, newRc) } - return oldReady, newReady, nil + return int32(oldReady), int32(newReady), nil } var buffer bytes.Buffer config := &RollingUpdaterConfig{ @@ -860,7 +860,7 @@ func TestUpdate_progressTimeout(t *testing.T) { return nil }, } - updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int, int, error) { + updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int32, int32, error) { // Coerce a timeout by pods never becoming ready. return 0, 0, nil } @@ -913,7 +913,7 @@ func TestUpdate_assignOriginalAnnotation(t *testing.T) { cleanup: func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error { return nil }, - getReadyPods: func(oldRc, newRc *api.ReplicationController) (int, int, error) { + getReadyPods: func(oldRc, newRc *api.ReplicationController) (int32, int32, error) { return 1, 1, nil }, } @@ -1573,8 +1573,8 @@ func TestRollingUpdater_readyPods(t *testing.T) { oldRc *api.ReplicationController newRc *api.ReplicationController // expectated old/new ready counts - oldReady int - newReady int + oldReady int32 + newReady int32 // pods owned by the rcs; indicate whether they're ready oldPods []bool newPods []bool diff --git a/pkg/kubectl/run.go b/pkg/kubectl/run.go index 0fc3ec85eb..f24641e120 100644 --- a/pkg/kubectl/run.go +++ b/pkg/kubectl/run.go @@ -105,7 +105,7 @@ func (DeploymentV1Beta1) Generate(genericParams map[string]interface{}) (runtime Labels: labels, }, Spec: extensions.DeploymentSpec{ - Replicas: count, + Replicas: int32(count), Selector: &unversioned.LabelSelector{MatchLabels: labels}, Template: api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -605,7 +605,7 @@ func (BasicReplicationController) Generate(genericParams map[string]interface{}) Labels: labels, }, Spec: api.ReplicationControllerSpec{ - Replicas: count, + Replicas: int32(count), Selector: labels, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -680,11 +680,11 @@ func updatePodPorts(params map[string]string, podSpec *api.PodSpec) (err error) if port > 0 { podSpec.Containers[0].Ports = []api.ContainerPort{ { - ContainerPort: port, + ContainerPort: int32(port), }, } if hostPort > 0 { - podSpec.Containers[0].Ports[0].HostPort = hostPort + podSpec.Containers[0].Ports[0].HostPort = int32(hostPort) } } return nil diff --git a/pkg/kubectl/scale.go b/pkg/kubectl/scale.go index 7e9b6ba190..bef93d9909 100644 --- a/pkg/kubectl/scale.go +++ b/pkg/kubectl/scale.go @@ -129,8 +129,8 @@ func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name s // ValidateReplicationController ensures that the preconditions match. Returns nil if they are valid, an error otherwise func (precondition *ScalePrecondition) ValidateReplicationController(controller *api.ReplicationController) error { - if precondition.Size != -1 && controller.Spec.Replicas != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(controller.Spec.Replicas)} + if precondition.Size != -1 && int(controller.Spec.Replicas) != precondition.Size { + return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(controller.Spec.Replicas))} } if len(precondition.ResourceVersion) != 0 && controller.ResourceVersion != precondition.ResourceVersion { return PreconditionError{"resource version", precondition.ResourceVersion, controller.ResourceVersion} @@ -152,7 +152,7 @@ func (scaler *ReplicationControllerScaler) ScaleSimple(namespace, name string, p return err } } - controller.Spec.Replicas = int(newSize) + controller.Spec.Replicas = int32(newSize) // TODO: do retry on 409 errors here? if _, err := scaler.c.ReplicationControllers(namespace).Update(controller); err != nil { if errors.IsInvalid(err) { @@ -191,8 +191,8 @@ func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize // ValidateReplicaSet ensures that the preconditions match. Returns nil if they are valid, an error otherwise func (precondition *ScalePrecondition) ValidateReplicaSet(replicaSet *extensions.ReplicaSet) error { - if precondition.Size != -1 && replicaSet.Spec.Replicas != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(replicaSet.Spec.Replicas)} + if precondition.Size != -1 && int(replicaSet.Spec.Replicas) != precondition.Size { + return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(replicaSet.Spec.Replicas))} } if len(precondition.ResourceVersion) != 0 && replicaSet.ResourceVersion != precondition.ResourceVersion { return PreconditionError{"resource version", precondition.ResourceVersion, replicaSet.ResourceVersion} @@ -214,7 +214,7 @@ func (scaler *ReplicaSetScaler) ScaleSimple(namespace, name string, precondition return err } } - rs.Spec.Replicas = int(newSize) + rs.Spec.Replicas = int32(newSize) // TODO: do retry on 409 errors here? if _, err := scaler.c.ReplicaSets(namespace).Update(rs); err != nil { if errors.IsInvalid(err) { @@ -256,8 +256,8 @@ func (precondition *ScalePrecondition) ValidateJob(job *batch.Job) error { if precondition.Size != -1 && job.Spec.Parallelism == nil { return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), "nil"} } - if precondition.Size != -1 && *job.Spec.Parallelism != precondition.Size { - return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(*job.Spec.Parallelism)} + if precondition.Size != -1 && int(*job.Spec.Parallelism) != precondition.Size { + return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(int(*job.Spec.Parallelism))} } if len(precondition.ResourceVersion) != 0 && job.ResourceVersion != precondition.ResourceVersion { return PreconditionError{"resource version", precondition.ResourceVersion, job.ResourceVersion} @@ -280,7 +280,7 @@ func (scaler *JobScaler) ScaleSimple(namespace, name string, preconditions *Scal return err } } - parallelism := int(newSize) + parallelism := int32(newSize) job.Spec.Parallelism = ¶llelism if _, err := scaler.c.Jobs(namespace).Update(job); err != nil { if errors.IsInvalid(err) { @@ -319,8 +319,8 @@ func (scaler *JobScaler) Scale(namespace, name string, newSize uint, preconditio // ValidateDeployment ensures that the preconditions match. Returns nil if they are valid, an error otherwise. func (precondition *ScalePrecondition) ValidateDeployment(deployment *extensions.Deployment) error { - if precondition.Size != -1 && deployment.Spec.Replicas != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(deployment.Spec.Replicas)} + if precondition.Size != -1 && int(deployment.Spec.Replicas) != precondition.Size { + return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(deployment.Spec.Replicas))} } if len(precondition.ResourceVersion) != 0 && deployment.ResourceVersion != precondition.ResourceVersion { return PreconditionError{"resource version", precondition.ResourceVersion, deployment.ResourceVersion} @@ -346,7 +346,7 @@ func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, precondition // TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528). // For now I'm falling back to regular Deployment update operation. - deployment.Spec.Replicas = int(newSize) + deployment.Spec.Replicas = int32(newSize) if _, err := scaler.c.Deployments(namespace).Update(deployment); err != nil { if errors.IsInvalid(err) { return ScaleError{ScaleUpdateInvalidFailure, deployment.ResourceVersion, err} diff --git a/pkg/kubectl/scale_test.go b/pkg/kubectl/scale_test.go index d1af0105d3..f3ccb3ac98 100644 --- a/pkg/kubectl/scale_test.go +++ b/pkg/kubectl/scale_test.go @@ -107,7 +107,7 @@ func TestReplicationControllerScale(t *testing.T) { if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "replicationcontrollers" || action.GetName() != name { t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) } - if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "replicationcontrollers" || action.GetObject().(*api.ReplicationController).Spec.Replicas != int(count) { + if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "replicationcontrollers" || action.GetObject().(*api.ReplicationController).Spec.Replicas != int32(count) { t.Errorf("unexpected action %v, expected update-replicationController with replicas = %d", actions[1], count) } } @@ -261,7 +261,7 @@ func (c *ErrorJobs) Update(job *batch.Job) (*batch.Job, error) { } func (c *ErrorJobs) Get(name string) (*batch.Job, error) { - zero := 0 + zero := int32(0) return &batch.Job{ Spec: batch.JobSpec{ Parallelism: &zero, @@ -317,7 +317,7 @@ func TestJobScale(t *testing.T) { if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "jobs" || action.GetName() != name { t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) } - if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "jobs" || *action.GetObject().(*batch.Job).Spec.Parallelism != int(count) { + if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "jobs" || *action.GetObject().(*batch.Job).Spec.Parallelism != int32(count) { t.Errorf("unexpected action %v, expected update-job with parallelism = %d", actions[1], count) } } @@ -342,7 +342,7 @@ func TestJobScaleInvalid(t *testing.T) { } func TestJobScaleFailsPreconditions(t *testing.T) { - ten := 10 + ten := int32(10) fake := testclient.NewSimpleFake(&batch.Job{ Spec: batch.JobSpec{ Parallelism: &ten, @@ -364,7 +364,7 @@ func TestJobScaleFailsPreconditions(t *testing.T) { } func TestValidateJob(t *testing.T) { - zero, ten, twenty := 0, 10, 20 + zero, ten, twenty := int32(0), int32(10), int32(20) tests := []struct { preconditions ScalePrecondition job batch.Job @@ -557,7 +557,7 @@ func TestDeploymentScale(t *testing.T) { if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "deployments" || action.GetName() != name { t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) } - if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "deployments" || action.GetObject().(*extensions.Deployment).Spec.Replicas != int(count) { + if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "deployments" || action.GetObject().(*extensions.Deployment).Spec.Replicas != int32(count) { t.Errorf("unexpected action %v, expected update-deployment with replicas = %d", actions[1], count) } } @@ -603,7 +603,7 @@ func TestDeploymentScaleFailsPreconditions(t *testing.T) { } func TestValidateDeployment(t *testing.T) { - zero, ten, twenty := 0, 10, 20 + zero, ten, twenty := int32(0), int32(10), int32(20) tests := []struct { preconditions ScalePrecondition deployment extensions.Deployment diff --git a/pkg/kubectl/service.go b/pkg/kubectl/service.go index 57249e463a..2ffd72af6f 100644 --- a/pkg/kubectl/service.go +++ b/pkg/kubectl/service.go @@ -136,7 +136,7 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { } ports = append(ports, api.ServicePort{ Name: name, - Port: port, + Port: int32(port), Protocol: api.Protocol(params["protocol"]), }) } @@ -171,7 +171,7 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { // should be the same as Port for i := range service.Spec.Ports { port := service.Spec.Ports[i].Port - service.Spec.Ports[i].TargetPort = intstr.FromInt(port) + service.Spec.Ports[i].TargetPort = intstr.FromInt(int(port)) } } if params["create-external-load-balancer"] == "true" { diff --git a/pkg/kubectl/stop.go b/pkg/kubectl/stop.go index e7f21f71af..ef8167131a 100644 --- a/pkg/kubectl/stop.go +++ b/pkg/kubectl/stop.go @@ -367,7 +367,7 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) { // set deployment's history and scale to 0 // TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527 - d.Spec.RevisionHistoryLimit = util.IntPtr(0) + d.Spec.RevisionHistoryLimit = util.Int32Ptr(0) d.Spec.Replicas = 0 d.Spec.Paused = true }) diff --git a/pkg/kubectl/stop_test.go b/pkg/kubectl/stop_test.go index 1596b3b970..dfc161051e 100644 --- a/pkg/kubectl/stop_test.go +++ b/pkg/kubectl/stop_test.go @@ -379,7 +379,7 @@ func TestReplicaSetStop(t *testing.T) { func TestJobStop(t *testing.T) { name := "foo" ns := "default" - zero := 0 + zero := int32(0) tests := []struct { Name string Objs []runtime.Object diff --git a/pkg/kubelet/envvars/envvars.go b/pkg/kubelet/envvars/envvars.go index 5a1de0f35c..31e82eb781 100644 --- a/pkg/kubelet/envvars/envvars.go +++ b/pkg/kubelet/envvars/envvars.go @@ -44,13 +44,13 @@ func FromServices(services *api.ServiceList) []api.EnvVar { result = append(result, api.EnvVar{Name: name, Value: service.Spec.ClusterIP}) // First port - give it the backwards-compatible name name = makeEnvVariableName(service.Name) + "_SERVICE_PORT" - result = append(result, api.EnvVar{Name: name, Value: strconv.Itoa(service.Spec.Ports[0].Port)}) + result = append(result, api.EnvVar{Name: name, Value: strconv.Itoa(int(service.Spec.Ports[0].Port))}) // All named ports (only the first may be unnamed, checked in validation) for i := range service.Spec.Ports { sp := &service.Spec.Ports[i] if sp.Name != "" { pn := name + "_" + makeEnvVariableName(sp.Name) - result = append(result, api.EnvVar{Name: pn, Value: strconv.Itoa(sp.Port)}) + result = append(result, api.EnvVar{Name: pn, Value: strconv.Itoa(int(sp.Port))}) } } // Docker-compatible vars. @@ -96,7 +96,7 @@ func makeLinkVariables(service *api.Service) []api.EnvVar { }, { Name: portPrefix + "_PORT", - Value: strconv.Itoa(sp.Port), + Value: strconv.Itoa(int(sp.Port)), }, { Name: portPrefix + "_ADDR", diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 91f8665ce9..8fa61fe642 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1353,8 +1353,8 @@ func makePortMappings(container *api.Container) (ports []kubecontainer.PortMappi names := make(map[string]struct{}) for _, p := range container.Ports { pm := kubecontainer.PortMapping{ - HostPort: p.HostPort, - ContainerPort: p.ContainerPort, + HostPort: int(p.HostPort), + ContainerPort: int(p.ContainerPort), Protocol: p.Protocol, HostIP: p.HostIP, } @@ -3506,7 +3506,7 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain cid := cs.ID.String() status := &api.ContainerStatus{ Name: cs.Name, - RestartCount: cs.RestartCount, + RestartCount: int32(cs.RestartCount), Image: cs.Image, ImageID: cs.ImageID, ContainerID: cid, @@ -3516,7 +3516,7 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain status.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.NewTime(cs.StartedAt)} case kubecontainer.ContainerStateExited: status.State.Terminated = &api.ContainerStateTerminated{ - ExitCode: cs.ExitCode, + ExitCode: int32(cs.ExitCode), Reason: cs.Reason, Message: cs.Message, StartedAt: unversioned.NewTime(cs.StartedAt), diff --git a/pkg/kubelet/lifecycle/handlers.go b/pkg/kubelet/lifecycle/handlers.go index de7a765264..245a183c3e 100644 --- a/pkg/kubelet/lifecycle/handlers.go +++ b/pkg/kubelet/lifecycle/handlers.go @@ -78,7 +78,7 @@ func resolvePort(portReference intstr.IntOrString, container *api.Container) (in } for _, portSpec := range container.Ports { if portSpec.Name == portName { - return portSpec.ContainerPort, nil + return int(portSpec.ContainerPort), nil } } return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container) diff --git a/pkg/kubelet/lifecycle/handlers_test.go b/pkg/kubelet/lifecycle/handlers_test.go index 4797db2f33..1b1968fbfe 100644 --- a/pkg/kubelet/lifecycle/handlers_test.go +++ b/pkg/kubelet/lifecycle/handlers_test.go @@ -43,7 +43,7 @@ func TestResolvePortString(t *testing.T) { name := "foo" container := &api.Container{ Ports: []api.ContainerPort{ - {Name: name, ContainerPort: expected}, + {Name: name, ContainerPort: int32(expected)}, }, } port, err := resolvePort(intstr.FromString(name), container) @@ -56,7 +56,7 @@ func TestResolvePortString(t *testing.T) { } func TestResolvePortStringUnknown(t *testing.T) { - expected := 80 + expected := int32(80) name := "foo" container := &api.Container{ Ports: []api.ContainerPort{ diff --git a/pkg/kubelet/prober/prober.go b/pkg/kubelet/prober/prober.go index 11d225aaf8..e322b54c40 100644 --- a/pkg/kubelet/prober/prober.go +++ b/pkg/kubelet/prober/prober.go @@ -198,7 +198,7 @@ func extractPort(param intstr.IntOrString, container api.Container) (int, error) func findPortByName(container api.Container, portName string) (int, error) { for _, port := range container.Ports { if port.Name == portName { - return port.ContainerPort, nil + return int(port.ContainerPort), nil } } return 0, fmt.Errorf("port %s not found", portName) diff --git a/pkg/kubelet/prober/worker.go b/pkg/kubelet/prober/worker.go index 5067dd5474..6edd3daa38 100644 --- a/pkg/kubelet/prober/worker.go +++ b/pkg/kubelet/prober/worker.go @@ -188,7 +188,7 @@ func (w *worker) doProbe() (keepGoing bool) { w.pod.Spec.RestartPolicy != api.RestartPolicyNever } - if int(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds { + if int32(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds { return true } @@ -205,8 +205,8 @@ func (w *worker) doProbe() (keepGoing bool) { w.resultRun = 1 } - if (result == results.Failure && w.resultRun < w.spec.FailureThreshold) || - (result == results.Success && w.resultRun < w.spec.SuccessThreshold) { + if (result == results.Failure && w.resultRun < int(w.spec.FailureThreshold)) || + (result == results.Success && w.resultRun < int(w.spec.SuccessThreshold)) { // Success or failure is below threshold - leave the probe state unchanged. return true } diff --git a/pkg/kubemark/hollow_proxy.go b/pkg/kubemark/hollow_proxy.go index 7a3920eefb..2cff3473f4 100644 --- a/pkg/kubemark/hollow_proxy.go +++ b/pkg/kubemark/hollow_proxy.go @@ -60,7 +60,7 @@ func NewHollowProxyOrDie( ) *HollowProxy { // Create and start Hollow Proxy config := options.NewProxyConfig() - config.OOMScoreAdj = util.IntPtr(0) + config.OOMScoreAdj = util.Int32Ptr(0) config.ResourceContainer = "" config.NodeRef = &api.ObjectReference{ Kind: "Node", diff --git a/pkg/master/controller.go b/pkg/master/controller.go index ca40a68391..d74f5a8bc5 100644 --- a/pkg/master/controller.go +++ b/pkg/master/controller.go @@ -158,12 +158,12 @@ func createPortAndServiceSpec(servicePort int, nodePort int, servicePortName str //Use the Cluster IP type for the service port if NodePort isn't provided. //Otherwise, we will be binding the master service to a NodePort. servicePorts := []api.ServicePort{{Protocol: api.ProtocolTCP, - Port: servicePort, + Port: int32(servicePort), Name: servicePortName, TargetPort: intstr.FromInt(servicePort)}} serviceType := api.ServiceTypeClusterIP if nodePort > 0 { - servicePorts[0].NodePort = nodePort + servicePorts[0].NodePort = int32(nodePort) serviceType = api.ServiceTypeNodePort } if extraServicePorts != nil { @@ -175,7 +175,7 @@ func createPortAndServiceSpec(servicePort int, nodePort int, servicePortName str // createEndpointPortSpec creates an array of endpoint ports func createEndpointPortSpec(endpointPort int, endpointPortName string, extraEndpointPorts []api.EndpointPort) []api.EndpointPort { endpointPorts := []api.EndpointPort{{Protocol: api.ProtocolTCP, - Port: endpointPort, + Port: int32(endpointPort), Name: endpointPortName, }} if extraEndpointPorts != nil { diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index 36ace75c3e..17f735fd7b 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -285,8 +285,8 @@ func TestControllerServicePorts(t *testing.T) { controller := master.NewBootstrapController() - assert.Equal(1000, controller.ExtraServicePorts[0].Port) - assert.Equal(1010, controller.ExtraServicePorts[1].Port) + assert.Equal(int32(1000), controller.ExtraServicePorts[0].Port) + assert.Equal(int32(1010), controller.ExtraServicePorts[1].Port) } // TestGetNodeAddresses verifies that proper results are returned diff --git a/pkg/metrics/metrics_grabber.go b/pkg/metrics/metrics_grabber.go index 6058d71b95..0f02a7860f 100644 --- a/pkg/metrics/metrics_grabber.go +++ b/pkg/metrics/metrics_grabber.go @@ -94,14 +94,14 @@ func (g *MetricsGrabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items) } kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port - return g.grabFromKubeletInternal(nodeName, kubeletPort) + return g.grabFromKubeletInternal(nodeName, int(kubeletPort)) } func (g *MetricsGrabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (KubeletMetrics, error) { if kubeletPort <= 0 || kubeletPort > 65535 { return KubeletMetrics{}, fmt.Errorf("Invalid Kubelet port %v. Skipping Kubelet's metrics gathering.", kubeletPort) } - output, err := g.getMetricsFromNode(nodeName, kubeletPort) + output, err := g.getMetricsFromNode(nodeName, int(kubeletPort)) if err != nil { return KubeletMetrics{}, err } @@ -173,7 +173,7 @@ func (g *MetricsGrabber) Grab(unknownMetrics sets.String) (MetricsCollection, er } else { for _, node := range nodes.Items { kubeletPort := node.Status.DaemonEndpoints.KubeletEndpoint.Port - metrics, err := g.grabFromKubeletInternal(node.Name, kubeletPort) + metrics, err := g.grabFromKubeletInternal(node.Name, int(kubeletPort)) if err != nil { errs = append(errs, err) } diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 5c7bd23008..4cdd29ee61 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -330,7 +330,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { } func (proxier *Proxier) sameConfig(info *serviceInfo, service *api.Service, port *api.ServicePort) bool { - if info.protocol != port.Protocol || info.port != port.Port || info.nodePort != port.NodePort { + if info.protocol != port.Protocol || info.port != int(port.Port) || info.nodePort != int(port.NodePort) { return false } if !info.clusterIP.Equal(net.ParseIP(service.Spec.ClusterIP)) { @@ -426,9 +426,9 @@ func (proxier *Proxier) OnServiceUpdate(allServices []api.Service) { glog.V(1).Infof("Adding new service %q at %s:%d/%s", serviceName, serviceIP, servicePort.Port, servicePort.Protocol) info = newServiceInfo(serviceName) info.clusterIP = serviceIP - info.port = servicePort.Port + info.port = int(servicePort.Port) info.protocol = servicePort.Protocol - info.nodePort = servicePort.NodePort + info.nodePort = int(servicePort.NodePort) info.externalIPs = service.Spec.ExternalIPs // Deep-copy in case the service instance changes info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer) @@ -483,7 +483,7 @@ func (proxier *Proxier) OnEndpointsUpdate(allEndpoints []api.Endpoints) { port := &ss.Ports[i] for i := range ss.Addresses { addr := &ss.Addresses[i] - portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, port.Port}) + portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, int(port.Port)}) } } } diff --git a/pkg/proxy/userspace/proxier.go b/pkg/proxy/userspace/proxier.go index 76ba794811..0ee5185943 100644 --- a/pkg/proxy/userspace/proxier.go +++ b/pkg/proxy/userspace/proxier.go @@ -419,11 +419,11 @@ func (proxier *Proxier) OnServiceUpdate(services []api.Service) { continue } info.portal.ip = serviceIP - info.portal.port = servicePort.Port + info.portal.port = int(servicePort.Port) info.externalIPs = service.Spec.ExternalIPs // Deep-copy in case the service instance changes info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer) - info.nodePort = servicePort.NodePort + info.nodePort = int(servicePort.NodePort) info.sessionAffinityType = service.Spec.SessionAffinity glog.V(4).Infof("info: %+v", info) @@ -452,7 +452,7 @@ func (proxier *Proxier) OnServiceUpdate(services []api.Service) { } func sameConfig(info *serviceInfo, service *api.Service, port *api.ServicePort) bool { - if info.protocol != port.Protocol || info.portal.port != port.Port || info.nodePort != port.NodePort { + if info.protocol != port.Protocol || info.portal.port != int(port.Port) || info.nodePort != int(port.NodePort) { return false } if !info.portal.ip.Equal(net.ParseIP(service.Spec.ClusterIP)) { diff --git a/pkg/proxy/userspace/proxier_test.go b/pkg/proxy/userspace/proxier_test.go index 34b01a9fc4..2ba5d398bc 100644 --- a/pkg/proxy/userspace/proxier_test.go +++ b/pkg/proxy/userspace/proxier_test.go @@ -82,8 +82,8 @@ func waitForClosedPortUDP(p *Proxier, proxyPort int) error { return fmt.Errorf("port %d still open", proxyPort) } -var tcpServerPort int -var udpServerPort int +var tcpServerPort int32 +var udpServerPort int32 func init() { // Don't handle panics @@ -103,10 +103,11 @@ func init() { if err != nil { panic(fmt.Sprintf("failed to parse: %v", err)) } - tcpServerPort, err = strconv.Atoi(port) + tcpServerPortValue, err := strconv.Atoi(port) if err != nil { panic(fmt.Sprintf("failed to atoi(%s): %v", port, err)) } + tcpServerPort = int32(tcpServerPortValue) // UDP setup. udp, err := newUDPEchoServer() @@ -117,10 +118,11 @@ func init() { if err != nil { panic(fmt.Sprintf("failed to parse: %v", err)) } - udpServerPort, err = strconv.Atoi(port) + udpServerPortValue, err := strconv.Atoi(port) if err != nil { panic(fmt.Sprintf("failed to atoi(%s): %v", port, err)) } + udpServerPort = int32(udpServerPortValue) go udp.Loop() } @@ -564,7 +566,7 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{ Name: "p", - Port: svcInfo.proxyPort, + Port: int32(svcInfo.proxyPort), Protocol: "TCP", }}}, }}) @@ -616,7 +618,7 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{ Name: "p", - Port: svcInfo.proxyPort, + Port: int32(svcInfo.proxyPort), Protocol: "UDP", }}}, }}) @@ -752,7 +754,7 @@ func TestProxyUpdatePublicIPs(t *testing.T) { Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Name: "p", - Port: svcInfo.portal.port, + Port: int32(svcInfo.portal.port), Protocol: "TCP", }}, ClusterIP: svcInfo.portal.ip.String(), @@ -803,7 +805,7 @@ func TestProxyUpdatePortal(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{ Name: "p", - Port: svcInfo.proxyPort, + Port: int32(svcInfo.proxyPort), Protocol: "TCP", }}}, }}) @@ -816,7 +818,7 @@ func TestProxyUpdatePortal(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{ Name: "p", - Port: svcInfo.proxyPort, + Port: int32(svcInfo.proxyPort), Protocol: "TCP", }}}, }}) @@ -829,7 +831,7 @@ func TestProxyUpdatePortal(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{ Name: "p", - Port: svcInfo.proxyPort, + Port: int32(svcInfo.proxyPort), Protocol: "TCP", }}}, }}) diff --git a/pkg/proxy/userspace/roundrobin.go b/pkg/proxy/userspace/roundrobin.go index 6c28fc016a..86a93e2561 100644 --- a/pkg/proxy/userspace/roundrobin.go +++ b/pkg/proxy/userspace/roundrobin.go @@ -244,7 +244,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(allEndpoints []api.Endpoints) { port := &ss.Ports[i] for i := range ss.Addresses { addr := &ss.Addresses[i] - portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, port.Port}) + portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, int(port.Port)}) // Ignore the protocol field - we'll get that from the Service objects. } } diff --git a/pkg/registry/controller/etcd/etcd_test.go b/pkg/registry/controller/etcd/etcd_test.go index ec6079c647..9a5e086c42 100644 --- a/pkg/registry/controller/etcd/etcd_test.go +++ b/pkg/registry/controller/etcd/etcd_test.go @@ -281,7 +281,7 @@ func TestScaleUpdate(t *testing.T) { if err != nil { t.Fatalf("error setting new replication controller %v: %v", *validController, err) } - replicas := 12 + replicas := int32(12) update := autoscaling.Scale{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace}, Spec: autoscaling.ScaleSpec{ diff --git a/pkg/registry/controller/strategy.go b/pkg/registry/controller/strategy.go index f2c19391b4..9fa5bebdac 100644 --- a/pkg/registry/controller/strategy.go +++ b/pkg/registry/controller/strategy.go @@ -105,7 +105,7 @@ func (rcStrategy) AllowUnconditionalUpdate() bool { func ControllerToSelectableFields(controller *api.ReplicationController) fields.Set { objectMetaFieldsSet := generic.ObjectMetaFieldsSet(controller.ObjectMeta, true) controllerSpecificFieldsSet := fields.Set{ - "status.replicas": strconv.Itoa(controller.Status.Replicas), + "status.replicas": strconv.Itoa(int(controller.Status.Replicas)), } return generic.MergeFieldsSets(objectMetaFieldsSet, controllerSpecificFieldsSet) } diff --git a/pkg/registry/deployment/etcd/etcd_test.go b/pkg/registry/deployment/etcd/etcd_test.go index 43ee6ffb5a..67d9b6fc5f 100644 --- a/pkg/registry/deployment/etcd/etcd_test.go +++ b/pkg/registry/deployment/etcd/etcd_test.go @@ -225,7 +225,7 @@ func TestScaleUpdate(t *testing.T) { if err := storage.Deployment.Storage.Create(ctx, key, &validDeployment, &deployment, 0); err != nil { t.Fatalf("error setting new deployment (key: %s) %v: %v", key, validDeployment, err) } - replicas := 12 + replicas := int32(12) update := extensions.Scale{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace}, Spec: extensions.ScaleSpec{ diff --git a/pkg/registry/experimental/controller/etcd/etcd_test.go b/pkg/registry/experimental/controller/etcd/etcd_test.go index e1686b453c..5119d88cbc 100644 --- a/pkg/registry/experimental/controller/etcd/etcd_test.go +++ b/pkg/registry/experimental/controller/etcd/etcd_test.go @@ -54,7 +54,7 @@ var validPodTemplate = api.PodTemplate{ }, } -var validReplicas = 8 +var validReplicas = int32(8) var validControllerSpec = api.ReplicationControllerSpec{ Replicas: validReplicas, @@ -108,7 +108,7 @@ func TestUpdate(t *testing.T) { if err := si.Create(ctx, key, &validController, nil, 0); err != nil { t.Fatalf("unexpected error: %v", err) } - replicas := 12 + replicas := int32(12) update := extensions.Scale{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test"}, Spec: extensions.ScaleSpec{ diff --git a/pkg/registry/job/etcd/etcd_test.go b/pkg/registry/job/etcd/etcd_test.go index 40804c079f..1e1ba5a26a 100644 --- a/pkg/registry/job/etcd/etcd_test.go +++ b/pkg/registry/job/etcd/etcd_test.go @@ -39,8 +39,8 @@ func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) } func validNewJob() *batch.Job { - completions := 1 - parallelism := 1 + completions := int32(1) + parallelism := int32(1) return &batch.Job{ ObjectMeta: api.ObjectMeta{ Name: "foo", @@ -97,7 +97,7 @@ func TestUpdate(t *testing.T) { storage, _, server := newStorage(t) defer server.Terminate(t) test := registrytest.New(t, storage.Store) - two := 2 + two := int32(2) test.TestUpdate( // valid validNewJob(), diff --git a/pkg/registry/job/strategy.go b/pkg/registry/job/strategy.go index 2a445bf652..3c362326e7 100644 --- a/pkg/registry/job/strategy.go +++ b/pkg/registry/job/strategy.go @@ -158,7 +158,7 @@ func (jobStatusStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object func JobToSelectableFields(job *batch.Job) fields.Set { objectMetaFieldsSet := generic.ObjectMetaFieldsSet(job.ObjectMeta, true) specificFieldsSet := fields.Set{ - "status.successful": strconv.Itoa(job.Status.Succeeded), + "status.successful": strconv.Itoa(int(job.Status.Succeeded)), } return generic.MergeFieldsSets(objectMetaFieldsSet, specificFieldsSet) } diff --git a/pkg/registry/job/strategy_test.go b/pkg/registry/job/strategy_test.go index c42cc319bc..d90ea525ba 100644 --- a/pkg/registry/job/strategy_test.go +++ b/pkg/registry/job/strategy_test.go @@ -80,7 +80,7 @@ func TestJobStrategy(t *testing.T) { if len(errs) != 0 { t.Errorf("Unexpected error validating %v", errs) } - parallelism := 10 + parallelism := int32(10) updatedJob := &batch.Job{ ObjectMeta: api.ObjectMeta{Name: "bar", ResourceVersion: "4"}, Spec: batch.JobSpec{ @@ -173,8 +173,8 @@ func TestJobStatusStrategy(t *testing.T) { Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, }, } - oldParallelism := 10 - newParallelism := 11 + oldParallelism := int32(10) + newParallelism := int32(11) oldJob := &batch.Job{ ObjectMeta: api.ObjectMeta{ Name: "myjob", diff --git a/pkg/registry/node/etcd/etcd.go b/pkg/registry/node/etcd/etcd.go index 85e09adee9..ae7ba107d1 100644 --- a/pkg/registry/node/etcd/etcd.go +++ b/pkg/registry/node/etcd/etcd.go @@ -123,7 +123,7 @@ func (r *REST) getKubeletPort(ctx api.Context, nodeName string) (int, error) { if !ok { return 0, fmt.Errorf("Unexpected object type: %#v", node) } - return node.Status.DaemonEndpoints.KubeletEndpoint.Port, nil + return int(node.Status.DaemonEndpoints.KubeletEndpoint.Port), nil } func (c *REST) GetConnectionInfo(ctx api.Context, nodeName string) (string, uint, http.RoundTripper, error) { diff --git a/pkg/registry/node/strategy.go b/pkg/registry/node/strategy.go index fc00988ba7..9085090b8c 100644 --- a/pkg/registry/node/strategy.go +++ b/pkg/registry/node/strategy.go @@ -186,7 +186,7 @@ func ResourceLocation(getter ResourceGetter, connection client.ConnectionInfoGet if kubeletPort == 0 { kubeletPort = ports.KubeletPort } - if portReq == "" || strconv.Itoa(kubeletPort) == portReq { + if portReq == "" || strconv.Itoa(int(kubeletPort)) == portReq { scheme, port, kubeletTransport, err := connection.GetConnectionInfo(ctx, node.Name) if err != nil { return nil, nil, err diff --git a/pkg/registry/replicaset/etcd/etcd_test.go b/pkg/registry/replicaset/etcd/etcd_test.go index e56a077612..30eaa6e5be 100644 --- a/pkg/registry/replicaset/etcd/etcd_test.go +++ b/pkg/registry/replicaset/etcd/etcd_test.go @@ -295,7 +295,7 @@ func TestScaleUpdate(t *testing.T) { Namespace: api.NamespaceDefault, }, Spec: extensions.ScaleSpec{ - Replicas: replicas, + Replicas: int32(replicas), }, } @@ -308,7 +308,7 @@ func TestScaleUpdate(t *testing.T) { t.Fatalf("error fetching scale for %s: %v", name, err) } scale := obj.(*extensions.Scale) - if scale.Spec.Replicas != replicas { + if scale.Spec.Replicas != int32(replicas) { t.Errorf("wrong replicas count expected: %d got: %d", replicas, scale.Spec.Replicas) } diff --git a/pkg/registry/replicaset/strategy.go b/pkg/registry/replicaset/strategy.go index b7f7f54f7f..7d7b38a0d0 100644 --- a/pkg/registry/replicaset/strategy.go +++ b/pkg/registry/replicaset/strategy.go @@ -106,7 +106,7 @@ func (rsStrategy) AllowUnconditionalUpdate() bool { func ReplicaSetToSelectableFields(rs *extensions.ReplicaSet) fields.Set { objectMetaFieldsSet := generic.ObjectMetaFieldsSet(rs.ObjectMeta, true) rsSpecificFieldsSet := fields.Set{ - "status.replicas": strconv.Itoa(rs.Status.Replicas), + "status.replicas": strconv.Itoa(int(rs.Status.Replicas)), } return generic.MergeFieldsSets(objectMetaFieldsSet, rsSpecificFieldsSet) } diff --git a/pkg/registry/service/rest.go b/pkg/registry/service/rest.go index af6a591a16..40d7e373b0 100644 --- a/pkg/registry/service/rest.go +++ b/pkg/registry/service/rest.go @@ -115,7 +115,7 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err for i := range service.Spec.Ports { servicePort := &service.Spec.Ports[i] if servicePort.NodePort != 0 { - err := nodePortOp.Allocate(servicePort.NodePort) + err := nodePortOp.Allocate(int(servicePort.NodePort)) if err != nil { // TODO: when validation becomes versioned, this gets more complicated. el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), servicePort.NodePort, err.Error())} @@ -129,7 +129,7 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err // not really an internal error. return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err)) } - servicePort.NodePort = nodePort + servicePort.NodePort = int32(nodePort) } } @@ -240,7 +240,7 @@ func (rs *REST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, boo if assignNodePorts { for i := range service.Spec.Ports { servicePort := &service.Spec.Ports[i] - nodePort := servicePort.NodePort + nodePort := int(servicePort.NodePort) if nodePort != 0 { if !contains(oldNodePorts, nodePort) { err := nodePortOp.Allocate(nodePort) @@ -257,7 +257,7 @@ func (rs *REST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, boo // not really an internal error. return nil, false, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err)) } - servicePort.NodePort = nodePort + servicePort.NodePort = int32(nodePort) } // Detect duplicate node ports; this should have been caught by validation, so we panic if contains(newNodePorts, nodePort) { @@ -316,7 +316,7 @@ func (rs *REST) ResourceLocation(ctx api.Context, id string) (*url.URL, http.Rou } found := false for _, svcPort := range svc.Spec.Ports { - if svcPort.Port == int(portNum) { + if int64(svcPort.Port) == portNum { // use the declared port's name portStr = svcPort.Name found = true @@ -347,7 +347,7 @@ func (rs *REST) ResourceLocation(ctx api.Context, id string) (*url.URL, http.Rou if ss.Ports[i].Name == portStr { // Pick a random address. ip := ss.Addresses[rand.Intn(len(ss.Addresses))].IP - port := ss.Ports[i].Port + port := int(ss.Ports[i].Port) return &url.URL{ Scheme: svcScheme, Host: net.JoinHostPort(ip, strconv.Itoa(port)), @@ -374,7 +374,7 @@ func CollectServiceNodePorts(service *api.Service) []int { for i := range service.Spec.Ports { servicePort := &service.Spec.Ports[i] if servicePort.NodePort != 0 { - servicePorts = append(servicePorts, servicePort.NodePort) + servicePorts = append(servicePorts, int(servicePort.NodePort)) } } return servicePorts diff --git a/pkg/storage/etcd/etcd_watcher_test.go b/pkg/storage/etcd/etcd_watcher_test.go index 01423e9972..386f59b56a 100644 --- a/pkg/storage/etcd/etcd_watcher_test.go +++ b/pkg/storage/etcd/etcd_watcher_test.go @@ -270,7 +270,7 @@ func emptySubsets() []api.EndpointSubset { func makeSubsets(ip string, port int) []api.EndpointSubset { return []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: ip}}, - Ports: []api.EndpointPort{{Port: port}}, + Ports: []api.EndpointPort{{Port: int32(port)}}, }} } diff --git a/pkg/util/deployment/deployment.go b/pkg/util/deployment/deployment.go index 6160f5d138..baf5d15b76 100644 --- a/pkg/util/deployment/deployment.go +++ b/pkg/util/deployment/deployment.go @@ -237,8 +237,8 @@ func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.P } // Returns the sum of Replicas of the given replica sets. -func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int { - totalReplicaCount := 0 +func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { + totalReplicaCount := int32(0) for _, rs := range replicaSets { if rs != nil { totalReplicaCount += rs.Spec.Replicas @@ -248,8 +248,8 @@ func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int { } // GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets. -func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int { - totalReplicaCount := 0 +func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { + totalReplicaCount := int32(0) for _, rs := range replicaSets { if rs != nil { totalReplicaCount += rs.Status.Replicas @@ -259,7 +259,7 @@ func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) i } // Returns the number of available pods corresponding to the given replica sets. -func GetAvailablePodsForReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int) (int, error) { +func GetAvailablePodsForReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int32) (int32, error) { allPods, err := GetPodsForReplicaSets(c, rss) if err != nil { return 0, err @@ -267,8 +267,8 @@ func GetAvailablePodsForReplicaSets(c clientset.Interface, rss []*extensions.Rep return getReadyPodsCount(allPods, minReadySeconds), nil } -func getReadyPodsCount(pods []api.Pod, minReadySeconds int) int { - readyPodCount := 0 +func getReadyPodsCount(pods []api.Pod, minReadySeconds int32) int32 { + readyPodCount := int32(0) for _, pod := range pods { if IsPodAvailable(&pod, minReadySeconds) { readyPodCount++ @@ -277,7 +277,7 @@ func getReadyPodsCount(pods []api.Pod, minReadySeconds int) int { return readyPodCount } -func IsPodAvailable(pod *api.Pod, minReadySeconds int) bool { +func IsPodAvailable(pod *api.Pod, minReadySeconds int32) bool { if !controller.IsPodActive(*pod) { return false } @@ -340,17 +340,17 @@ func IsRollingUpdate(deployment *extensions.Deployment) bool { // When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it. // 1) The new RS is saturated: newRS's replicas == deployment's replicas // 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas -func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int, error) { +func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int32, error) { switch deployment.Spec.Strategy.Type { case extensions.RollingUpdateDeploymentStrategyType: // Check if we can scale up. - maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Replicas, true) + maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(deployment.Spec.Replicas), true) if err != nil { return 0, err } // Find the total number of pods currentPodCount := GetReplicaCountForReplicaSets(allRSs) - maxTotalPods := deployment.Spec.Replicas + maxSurge + maxTotalPods := deployment.Spec.Replicas + int32(maxSurge) if currentPodCount >= maxTotalPods { // Cannot scale up. return newRS.Spec.Replicas, nil @@ -358,7 +358,7 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re // Scale up. scaleUpCount := maxTotalPods - currentPodCount // Do not exceed the number of desired replicas. - scaleUpCount = integer.IntMin(scaleUpCount, deployment.Spec.Replicas-newRS.Spec.Replicas) + scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(deployment.Spec.Replicas-newRS.Spec.Replicas))) return newRS.Spec.Replicas + scaleUpCount, nil case extensions.RecreateDeploymentStrategyType: return deployment.Spec.Replicas, nil @@ -389,12 +389,12 @@ func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, // 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) // 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) // 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) -func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int) (int, int, error) { - surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, desired, true) +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true) if err != nil { return 0, 0, err } - unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, desired, false) + unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false) if err != nil { return 0, 0, err } @@ -407,5 +407,5 @@ func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired unavailable = 1 } - return surge, unavailable, nil + return int32(surge), int32(unavailable), nil } diff --git a/pkg/util/deployment/deployment_test.go b/pkg/util/deployment/deployment_test.go index ce9b0cb9e1..31d23d2c18 100644 --- a/pkg/util/deployment/deployment_test.go +++ b/pkg/util/deployment/deployment_test.go @@ -124,7 +124,7 @@ func TestGetReadyPodsCount(t *testing.T) { } for _, test := range tests { - if count := getReadyPodsCount(test.pods, test.minReadySeconds); count != test.expected { + if count := getReadyPodsCount(test.pods, int32(test.minReadySeconds)); int(count) != test.expected { t.Errorf("Pods = %#v, minReadySeconds = %d, expected %d, got %d", test.pods, test.minReadySeconds, test.expected, count) } } diff --git a/pkg/util/util.go b/pkg/util/util.go index 29284b2060..4826a448b1 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -122,6 +122,12 @@ func IntPtr(i int) *int { return &o } +// Int32Ptr returns a pointer to an int32 +func Int32Ptr(i int32) *int32 { + o := i + return &o +} + // IntPtrDerefOr dereference the int ptr and returns it i not nil, // else returns def. func IntPtrDerefOr(ptr *int, def int) int { @@ -130,3 +136,12 @@ func IntPtrDerefOr(ptr *int, def int) int { } return def } + +// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil, +// else returns def. +func Int32PtrDerefOr(ptr *int32, def int32) int32 { + if ptr != nil { + return *ptr + } + return def +} diff --git a/pkg/volume/aws_ebs/aws_ebs.go b/pkg/volume/aws_ebs/aws_ebs.go index 35e9419c68..dfbdae8053 100644 --- a/pkg/volume/aws_ebs/aws_ebs.go +++ b/pkg/volume/aws_ebs/aws_ebs.go @@ -94,7 +94,7 @@ func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, fsType := ebs.FSType partition := "" if ebs.Partition != 0 { - partition = strconv.Itoa(ebs.Partition) + partition = strconv.Itoa(int(ebs.Partition)) } return &awsElasticBlockStoreMounter{ diff --git a/pkg/volume/fc/fc.go b/pkg/volume/fc/fc.go index a84a8c06ae..9410c0b6ee 100644 --- a/pkg/volume/fc/fc.go +++ b/pkg/volume/fc/fc.go @@ -92,7 +92,7 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, return nil, fmt.Errorf("empty lun") } - lun := strconv.Itoa(*fc.Lun) + lun := strconv.Itoa(int(*fc.Lun)) return &fcDiskMounter{ fcDisk: &fcDisk{ diff --git a/pkg/volume/fc/fc_test.go b/pkg/volume/fc/fc_test.go index 91e9c89b9d..ed281313c7 100644 --- a/pkg/volume/fc/fc_test.go +++ b/pkg/volume/fc/fc_test.go @@ -199,7 +199,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) { } func TestPluginVolume(t *testing.T) { - lun := 0 + lun := int32(0) vol := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ @@ -214,7 +214,7 @@ func TestPluginVolume(t *testing.T) { } func TestPluginPersistentVolume(t *testing.T) { - lun := 0 + lun := int32(0) vol := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "vol1", @@ -239,7 +239,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) { } defer os.RemoveAll(tmpDir) - lun := 0 + lun := int32(0) pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", diff --git a/pkg/volume/gce_pd/gce_pd.go b/pkg/volume/gce_pd/gce_pd.go index 42be86dcae..006ed57349 100644 --- a/pkg/volume/gce_pd/gce_pd.go +++ b/pkg/volume/gce_pd/gce_pd.go @@ -94,7 +94,7 @@ func (plugin *gcePersistentDiskPlugin) newMounterInternal(spec *volume.Spec, pod fsType := gce.FSType partition := "" if gce.Partition != 0 { - partition = strconv.Itoa(gce.Partition) + partition = strconv.Itoa(int(gce.Partition)) } return &gcePersistentDiskMounter{ diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index a778bab1b6..d36a43abcb 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -89,7 +89,7 @@ func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UI readOnly = spec.ReadOnly } - lun := strconv.Itoa(iscsi.Lun) + lun := strconv.Itoa(int(iscsi.Lun)) portal := portalMounter(iscsi.TargetPortal) iface := iscsi.ISCSIInterface diff --git a/plugin/cmd/kube-scheduler/app/options/options.go b/plugin/cmd/kube-scheduler/app/options/options.go index 69cd54945c..89f9d79b6a 100644 --- a/plugin/cmd/kube-scheduler/app/options/options.go +++ b/plugin/cmd/kube-scheduler/app/options/options.go @@ -50,7 +50,7 @@ func NewSchedulerServer() *SchedulerServer { // AddFlags adds flags for a specific SchedulerServer to the specified FlagSet func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) { - fs.IntVar(&s.Port, "port", s.Port, "The port that the scheduler's http service runs on") + fs.Int32Var(&s.Port, "port", s.Port, "The port that the scheduler's http service runs on") fs.StringVar(&s.Address, "address", s.Address, "The IP address to serve on (set to 0.0.0.0 for all interfaces)") fs.StringVar(&s.AlgorithmProvider, "algorithm-provider", s.AlgorithmProvider, "The scheduling algorithm provider to use, one of: "+factory.ListAlgorithmProviders()) fs.StringVar(&s.PolicyConfigFile, "policy-config-file", s.PolicyConfigFile, "File with scheduler policy configuration") @@ -60,12 +60,12 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) { var unusedBindPodsQPS float32 fs.Float32Var(&unusedBindPodsQPS, "bind-pods-qps", 0, "unused, use --kube-api-qps") fs.MarkDeprecated("bind-pods-qps", "flag is unused and will be removed. Use kube-api-qps instead.") - var unusedBindPodsBurst int - fs.IntVar(&unusedBindPodsBurst, "bind-pods-burst", 0, "unused, use --kube-api-burst") + var unusedBindPodsBurst int32 + fs.Int32Var(&unusedBindPodsBurst, "bind-pods-burst", 0, "unused, use --kube-api-burst") fs.MarkDeprecated("bind-pods-burst", "flag is unused and will be removed. Use kube-api-burst instead.") fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.") fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver") - fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") + fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") fs.StringVar(&s.SchedulerName, "scheduler-name", s.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's annotation with key 'scheduler.alpha.kubernetes.io/name'") leaderelection.BindFlags(&s.LeaderElection, fs) } diff --git a/plugin/cmd/kube-scheduler/app/server.go b/plugin/cmd/kube-scheduler/app/server.go index 6fe2eb176f..c4f788397e 100644 --- a/plugin/cmd/kube-scheduler/app/server.go +++ b/plugin/cmd/kube-scheduler/app/server.go @@ -82,7 +82,7 @@ func Run(s *options.SchedulerServer) error { kubeconfig.ContentType = s.ContentType // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = s.KubeAPIQPS - kubeconfig.Burst = s.KubeAPIBurst + kubeconfig.Burst = int(s.KubeAPIBurst) kubeClient, err := client.New(kubeconfig) if err != nil { @@ -101,7 +101,7 @@ func Run(s *options.SchedulerServer) error { mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ - Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)), + Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))), Handler: mux, } glog.Fatal(server.ListenAndServe()) diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index a67b161525..229761bfc0 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -683,7 +683,7 @@ func getUsedPorts(pods ...*api.Pod) map[int]bool { // "0" is explicitly ignored in PodFitsHostPorts, // which is the only function that uses this value. if podPort.HostPort != 0 { - ports[podPort.HostPort] = true + ports[int(podPort.HostPort)] = true } } } diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go index c0dc7913fa..73f71dc3d7 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -267,7 +267,7 @@ func TestPodFitsHost(t *testing.T) { func newPod(host string, hostPorts ...int) *api.Pod { networkPorts := []api.ContainerPort{} for _, port := range hostPorts { - networkPorts = append(networkPorts, api.ContainerPort{HostPort: port}) + networkPorts = append(networkPorts, api.ContainerPort{HostPort: int32(port)}) } return &api.Pod{ Spec: api.PodSpec{ @@ -1487,7 +1487,7 @@ func TestPredicatesRegistered(t *testing.T) { func newPodWithPort(hostPorts ...int) *api.Pod { networkPorts := []api.ContainerPort{} for _, port := range hostPorts { - networkPorts = append(networkPorts, api.ContainerPort{HostPort: port}) + networkPorts = append(networkPorts, api.ContainerPort{HostPort: int32(port)}) } return &api.Pod{ Spec: api.PodSpec{ diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go b/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go index d285096aab..e511c40021 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go @@ -72,7 +72,7 @@ func (s *NodeAffinity) CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInf for _, node := range nodes.Items { if nodeSelector.Matches(labels.Set(node.Labels)) { - counts[node.Name] += preferredSchedulingTerm.Weight + counts[node.Name] += int(preferredSchedulingTerm.Weight) } if counts[node.Name] > maxCount { diff --git a/plugin/pkg/scheduler/scheduler_test.go b/plugin/pkg/scheduler/scheduler_test.go index 01843e16e5..deac028340 100644 --- a/plugin/pkg/scheduler/scheduler_test.go +++ b/plugin/pkg/scheduler/scheduler_test.go @@ -55,7 +55,7 @@ func podWithID(id, desiredHost string) *api.Pod { func podWithPort(id, desiredHost string, port int) *api.Pod { pod := podWithID(id, desiredHost) pod.Spec.Containers = []api.Container{ - {Name: "ctr", Ports: []api.ContainerPort{{HostPort: port}}}, + {Name: "ctr", Ports: []api.ContainerPort{{HostPort: int32(port)}}}, } return pod } diff --git a/test/component/scheduler/perf/util.go b/test/component/scheduler/perf/util.go index 484086d9e2..a1ffda127f 100644 --- a/test/component/scheduler/perf/util.go +++ b/test/component/scheduler/perf/util.go @@ -137,7 +137,7 @@ func makePodsFromRC(c client.Interface, name string, podCount int) { Name: name, }, Spec: api.ReplicationControllerSpec{ - Replicas: podCount, + Replicas: int32(podCount), Selector: map[string]string{"name": name}, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ diff --git a/test/e2e/autoscaling_utils.go b/test/e2e/autoscaling_utils.go index a5f98f918d..af30b01be5 100644 --- a/test/e2e/autoscaling_utils.go +++ b/test/e2e/autoscaling_utils.go @@ -292,21 +292,21 @@ func (rc *ResourceConsumer) GetReplicas() int { if replicationController == nil { framework.Failf(rcIsNil) } - return replicationController.Status.Replicas + return int(replicationController.Status.Replicas) case kindDeployment: deployment, err := rc.framework.Client.Deployments(rc.framework.Namespace.Name).Get(rc.name) framework.ExpectNoError(err) if deployment == nil { framework.Failf(deploymentIsNil) } - return deployment.Status.Replicas + return int(deployment.Status.Replicas) case kindReplicaSet: rs, err := rc.framework.Client.ReplicaSets(rc.framework.Namespace.Name).Get(rc.name) framework.ExpectNoError(err) if rs == nil { framework.Failf(rsIsNil) } - return rs.Status.Replicas + return int(rs.Status.Replicas) default: framework.Failf(invalidKind) } diff --git a/test/e2e/batch_v1_jobs.go b/test/e2e/batch_v1_jobs.go index a28be96e8b..5a907cf4e8 100644 --- a/test/e2e/batch_v1_jobs.go +++ b/test/e2e/batch_v1_jobs.go @@ -46,9 +46,9 @@ const ( var _ = framework.KubeDescribe("V1Job", func() { f := framework.NewDefaultFramework("v1job") - parallelism := 2 - completions := 4 - lotsOfFailures := 5 // more than completions + parallelism := int32(2) + completions := int32(4) + lotsOfFailures := int32(5) // more than completions // Simplest case: all pods succeed promptly It("should run a job to completion when tasks succeed", func() { @@ -116,8 +116,8 @@ var _ = framework.KubeDescribe("V1Job", func() { }) It("should scale a job up", func() { - startParallelism := 1 - endParallelism := 2 + startParallelism := int32(1) + endParallelism := int32(2) By("Creating a job") job := newTestV1Job("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) job, err := createV1Job(f.Client, f.Namespace.Name, job) @@ -141,8 +141,8 @@ var _ = framework.KubeDescribe("V1Job", func() { }) It("should scale a job down", func() { - startParallelism := 2 - endParallelism := 1 + startParallelism := int32(2) + endParallelism := int32(1) By("Creating a job") job := newTestV1Job("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions) job, err := createV1Job(f.Client, f.Namespace.Name, job) @@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("V1Job", func() { }) // newTestV1Job returns a job which does one of several testing behaviors. -func newTestV1Job(behavior, name string, rPol api.RestartPolicy, parallelism, completions int) *batch.Job { +func newTestV1Job(behavior, name string, rPol api.RestartPolicy, parallelism, completions int32) *batch.Job { job := &batch.Job{ ObjectMeta: api.ObjectMeta{ Name: name, @@ -273,7 +273,7 @@ func deleteV1Job(c *client.Client, ns, name string) error { } // Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy. -func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism int) error { +func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism int32) error { label := labels.SelectorFromSet(labels.Set(map[string]string{v1JobSelectorKey: jobName})) return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { options := api.ListOptions{LabelSelector: label} @@ -281,7 +281,7 @@ func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism i if err != nil { return false, err } - count := 0 + count := int32(0) for _, p := range pods.Items { if p.Status.Phase == api.PodRunning { count++ @@ -292,7 +292,7 @@ func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism i } // Wait for job to reach completions. -func waitForV1JobFinish(c *client.Client, ns, jobName string, completions int) error { +func waitForV1JobFinish(c *client.Client, ns, jobName string, completions int32) error { return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { curr, err := c.Batch().Jobs(ns).Get(jobName) if err != nil { diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index 8024d66e6f..e88e5038d2 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -80,7 +80,7 @@ func masterUpgradeGKE(v string) error { return err } -var nodeUpgrade = func(f *framework.Framework, replicas int, v string) error { +var nodeUpgrade = func(f *framework.Framework, replicas int32, v string) error { // Perform the upgrade. var err error switch framework.TestContext.Provider { @@ -172,7 +172,7 @@ func nodeUpgradeGKE(v string) error { var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() { - svcName, replicas := "baz", 2 + svcName, replicas := "baz", int32(2) var rcName, ip, v string var ingress api.LoadBalancerIngress @@ -429,7 +429,7 @@ func runCmd(command string, args ...string) (string, string, error) { return stdout, stderr, nil } -func validate(f *framework.Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error { +func validate(f *framework.Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int32) error { framework.Logf("Beginning cluster validation") // Verify RC. rcs, err := f.Client.ReplicationControllers(f.Namespace.Name).List(api.ListOptions{}) diff --git a/test/e2e/container_probe.go b/test/e2e/container_probe.go index 718db5a70b..d51970913f 100644 --- a/test/e2e/container_probe.go +++ b/test/e2e/container_probe.go @@ -137,7 +137,7 @@ func getTransitionTimeForReadyCondition(p *api.Pod) (time.Time, error) { func getRestartCount(p *api.Pod) int { count := 0 for _, containerStatus := range p.Status.ContainerStatuses { - count += containerStatus.RestartCount + count += int(containerStatus.RestartCount) } return count } diff --git a/test/e2e/deployment.go b/test/e2e/deployment.go index c2791de04a..e61e7faae1 100644 --- a/test/e2e/deployment.go +++ b/test/e2e/deployment.go @@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Deployment", func() { }) }) -func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet { +func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet { zero := int64(0) return &extensions.ReplicaSet{ ObjectMeta: api.ObjectMeta{ @@ -109,7 +109,7 @@ func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName } } -func newDeployment(deploymentName string, replicas int, podLabels map[string]string, imageName string, image string, strategyType extensions.DeploymentStrategyType, revisionHistoryLimit *int) *extensions.Deployment { +func newDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName string, image string, strategyType extensions.DeploymentStrategyType, revisionHistoryLimit *int32) *extensions.Deployment { zero := int64(0) return &extensions.Deployment{ ObjectMeta: api.ObjectMeta{ @@ -217,7 +217,7 @@ func testNewDeployment(f *framework.Framework) { deploymentName := "test-new-deployment" podLabels := map[string]string{"name": nginxImageName} - replicas := 1 + replicas := int32(1) framework.Logf("Creating simple deployment %s", deploymentName) d := newDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil) d.Annotations = map[string]string{"test": "should-copy-to-replica-set", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} @@ -257,7 +257,7 @@ func testRollingUpdateDeployment(f *framework.Framework) { } rsName := "test-rolling-update-controller" - replicas := 3 + replicas := int32(3) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. @@ -306,7 +306,7 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) { "pod": nginxImageName, } rsName := "test-rolling-scale-controller" - replicas := 1 + replicas := int32(1) rsRevision := "3546343826724305832" annotations := make(map[string]string) @@ -369,7 +369,7 @@ func testRecreateDeployment(f *framework.Framework) { } rsName := "test-recreate-controller" - replicas := 3 + replicas := int32(3) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. @@ -423,8 +423,8 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { "pod": nginxImageName, } rsName := "test-cleanup-controller" - replicas := 1 - revisionHistoryLimit := util.IntPtr(0) + replicas := int32(1) + revisionHistoryLimit := util.Int32Ptr(0) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) Expect(err).NotTo(HaveOccurred()) @@ -478,7 +478,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) - err = framework.WaitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit) + err = framework.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit)) Expect(err).NotTo(HaveOccurred()) close(stopCh) } @@ -499,7 +499,7 @@ func testRolloverDeployment(f *framework.Framework) { } rsName := "test-rollover-controller" - rsReplicas := 4 + rsReplicas := int32(4) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, nginxImageName, nginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. @@ -509,13 +509,13 @@ func testRolloverDeployment(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) } // Wait for the required pods to be ready for at least minReadySeconds (be available) - deploymentMinReadySeconds := 5 - err = framework.WaitForPodsReady(c, ns, podName, deploymentMinReadySeconds) + deploymentMinReadySeconds := int32(5) + err = framework.WaitForPodsReady(c, ns, podName, int(deploymentMinReadySeconds)) Expect(err).NotTo(HaveOccurred()) // Create a deployment to delete nginx pods and instead bring up redis-slave pods. deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave" - deploymentReplicas := 4 + deploymentReplicas := int32(4) deploymentImage := "gcr.io/google_samples/gb-redisslave:v1" deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType framework.Logf("Creating deployment %s", deploymentName) @@ -653,7 +653,7 @@ func testRollbackDeployment(f *framework.Framework) { // 1. Create a deployment to create nginx pods. deploymentName, deploymentImageName := "test-rollback-deployment", nginxImageName - deploymentReplicas := 1 + deploymentReplicas := int32(1) deploymentImage := nginxImage deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType framework.Logf("Creating deployment %s", deploymentName) @@ -765,7 +765,7 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) { // Create an old RS without revision rsName := "test-rollback-no-revision-controller" - rsReplicas := 0 + rsReplicas := int32(0) rs := newRS(rsName, rsReplicas, rsPodLabels, nginxImageName, nginxImage) rs.Annotations = make(map[string]string) rs.Annotations["make"] = "difference" @@ -774,7 +774,7 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) { // 1. Create a deployment to create nginx pods, which have different template than the replica set created above. deploymentName, deploymentImageName := "test-rollback-no-revision-deployment", nginxImageName - deploymentReplicas := 1 + deploymentReplicas := int32(1) deploymentImage := nginxImage deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType framework.Logf("Creating deployment %s", deploymentName) @@ -898,7 +898,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) { podLabels := map[string]string{"name": podName} rsName := "test-adopted-controller" - replicas := 3 + replicas := int32(3) image := nginxImage _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, podLabels, podName, image)) Expect(err).NotTo(HaveOccurred()) @@ -944,5 +944,5 @@ func testDeploymentLabelAdopted(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) err = framework.CheckPodHashLabel(pods) Expect(err).NotTo(HaveOccurred()) - Expect(len(pods.Items)).Should(Equal(replicas)) + Expect(int32(len(pods.Items))).Should(Equal(replicas)) } diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 43341a7693..a5c27e3aa1 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -120,7 +120,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // cluster infrastructure pods that are being pulled or started can block // test pods from running, and tests that ensure all pods are running and // ready will fail). - if err := framework.WaitForPodsRunningReady(api.NamespaceSystem, framework.TestContext.MinStartupPods, podStartupTimeout); err != nil { + if err := framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout); err != nil { if c, errClient := framework.LoadClient(); errClient != nil { framework.Logf("Unable to dump cluster information because: %v", errClient) } else { diff --git a/test/e2e/empty_dir_wrapper.go b/test/e2e/empty_dir_wrapper.go index c96979f18a..ec5dad8c69 100644 --- a/test/e2e/empty_dir_wrapper.go +++ b/test/e2e/empty_dir_wrapper.go @@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() { Image: "gcr.io/google_containers/fakegitserver:0.1", ImagePullPolicy: "IfNotPresent", Ports: []api.ContainerPort{ - {ContainerPort: containerPort}, + {ContainerPort: int32(containerPort)}, }, }, }, @@ -92,7 +92,7 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() { Ports: []api.ServicePort{ { Name: "http-portal", - Port: httpPort, + Port: int32(httpPort), TargetPort: intstr.FromInt(containerPort), }, }, diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index ef38522dff..75acdb2a58 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -410,7 +410,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str } else { return []api.ServicePort{{ Protocol: "TCP", - Port: svcPort, + Port: int32(svcPort), TargetPort: intstr.FromInt(contPort), }} } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 5c47c92db6..911829ac13 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -421,7 +421,7 @@ func hasReplicationControllersForPod(rcs *api.ReplicationControllerList, pod api // that it requires the list of pods on every iteration. This is useful, for // example, in cluster startup, because the number of pods increases while // waiting. -func WaitForPodsRunningReady(ns string, minPods int, timeout time.Duration) error { +func WaitForPodsRunningReady(ns string, minPods int32, timeout time.Duration) error { c, err := LoadClient() if err != nil { return err @@ -438,7 +438,7 @@ func WaitForPodsRunningReady(ns string, minPods int, timeout time.Duration) erro Logf("Error getting replication controllers in namespace '%s': %v", ns, err) return false, nil } - replicas := 0 + replicas := int32(0) for _, rc := range rcList.Items { replicas += rc.Spec.Replicas } @@ -448,7 +448,7 @@ func WaitForPodsRunningReady(ns string, minPods int, timeout time.Duration) erro Logf("Error getting pods in namespace '%s': %v", ns, err) return false, nil } - nOk, replicaOk, badPods := 0, 0, []api.Pod{} + nOk, replicaOk, badPods := int32(0), int32(0), []api.Pod{} for _, pod := range podList.Items { if res, err := PodRunningReady(&pod); res && err == nil { nOk++ @@ -1231,7 +1231,7 @@ func PodsResponding(c *client.Client, ns, name string, wantName bool, pods *api. return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) } -func PodsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) { +func PodsCreated(c *client.Client, ns, name string, replicas int32) (*api.PodList, error) { timeout := 2 * time.Minute // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) @@ -1251,7 +1251,7 @@ func PodsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, } Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas) - if len(created) == replicas { + if int32(len(created)) == replicas { pods.Items = created return pods, nil } @@ -1274,7 +1274,7 @@ func podsRunning(c *client.Client, pods *api.PodList) []error { return e } -func VerifyPods(c *client.Client, ns, name string, wantName bool, replicas int) error { +func VerifyPods(c *client.Client, ns, name string, wantName bool, replicas int32) error { pods, err := PodsCreated(c, ns, name, replicas) if err != nil { return err @@ -1747,7 +1747,7 @@ func (config *DeploymentConfig) create() error { Name: config.Name, }, Spec: extensions.DeploymentSpec{ - Replicas: config.Replicas, + Replicas: int32(config.Replicas), Selector: &unversioned.LabelSelector{ MatchLabels: map[string]string{ "name": config.Name, @@ -1800,7 +1800,7 @@ func (config *ReplicaSetConfig) create() error { Name: config.Name, }, Spec: extensions.ReplicaSetSpec{ - Replicas: config.Replicas, + Replicas: int32(config.Replicas), Selector: &unversioned.LabelSelector{ MatchLabels: map[string]string{ "name": config.Name, @@ -1853,7 +1853,7 @@ func (config *RCConfig) create() error { Name: config.Name, }, Spec: api.ReplicationControllerSpec{ - Replicas: config.Replicas, + Replicas: int32(config.Replicas), Selector: map[string]string{ "name": config.Name, }, @@ -1902,13 +1902,13 @@ func (config *RCConfig) applyTo(template *api.PodTemplateSpec) { if config.Ports != nil { for k, v := range config.Ports { c := &template.Spec.Containers[0] - c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: v}) + c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v)}) } } if config.HostPorts != nil { for k, v := range config.HostPorts { c := &template.Spec.Containers[0] - c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: v, HostPort: v}) + c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)}) } } if config.CpuLimit > 0 || config.MemLimit > 0 { @@ -2461,7 +2461,7 @@ func waitForReplicaSetPodsGone(c *client.Client, rs *extensions.ReplicaSet) erro // Waits for the deployment to reach desired state. // Returns an error if minAvailable or maxCreated is broken at any times. -func WaitForDeploymentStatus(c clientset.Interface, ns, deploymentName string, desiredUpdatedReplicas, minAvailable, maxCreated, minReadySeconds int) error { +func WaitForDeploymentStatus(c clientset.Interface, ns, deploymentName string, desiredUpdatedReplicas, minAvailable, maxCreated, minReadySeconds int32) error { var oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet var newRS *extensions.ReplicaSet var deployment *extensions.Deployment @@ -2528,7 +2528,7 @@ func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentNa if err != nil { return false, err } - if deployment.Status.ObservedGeneration >= desiredGeneration && deployment.Status.UpdatedReplicas >= minUpdatedReplicas { + if deployment.Status.ObservedGeneration >= desiredGeneration && deployment.Status.UpdatedReplicas >= int32(minUpdatedReplicas) { return true, nil } return false, nil @@ -2619,7 +2619,7 @@ func WaitForPodsReady(c *clientset.Clientset, ns, name string, minReadySeconds i return false, nil } for _, pod := range pods.Items { - if !deploymentutil.IsPodAvailable(&pod, minReadySeconds) { + if !deploymentutil.IsPodAvailable(&pod, int32(minReadySeconds)) { return false, nil } } @@ -2654,7 +2654,7 @@ func WaitForObservedDeployment(c *clientset.Clientset, ns, deploymentName string return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute) } -func logPodsOfReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int) { +func logPodsOfReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int32) { allPods, err := deploymentutil.GetPodsForReplicaSets(c, rss) if err == nil { for _, pod := range allPods { @@ -2743,7 +2743,7 @@ func FailedContainers(pod *api.Pod) map[string]ContainerFailures { if state, ok = states[status.ContainerID]; !ok { state = ContainerFailures{} } - state.Restarts = status.RestartCount + state.Restarts = int(status.RestartCount) states[status.ContainerID] = state } } @@ -3352,9 +3352,9 @@ func getSvcNodePort(client *client.Client, ns, name string, svcPort int) (int, e return 0, err } for _, p := range svc.Spec.Ports { - if p.Port == svcPort { + if p.Port == int32(svcPort) { if p.NodePort != 0 { - return p.NodePort, nil + return int(p.NodePort), nil } } } @@ -3682,7 +3682,7 @@ func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) { Name: containerName, Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab", Env: []api.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}}, - Ports: []api.ContainerPort{{ContainerPort: port}}, + Ports: []api.ContainerPort{{ContainerPort: int32(port)}}, }, }, NodeName: nodeName, diff --git a/test/e2e/horizontal_pod_autoscaling.go b/test/e2e/horizontal_pod_autoscaling.go index a6be6185e2..6686c0adbf 100644 --- a/test/e2e/horizontal_pod_autoscaling.go +++ b/test/e2e/horizontal_pod_autoscaling.go @@ -105,16 +105,16 @@ var _ = framework.KubeDescribe("Horizontal pod autoscaling (scale resource: CPU) // HPAScaleTest struct is used by the scale(...) function. type HPAScaleTest struct { - initPods int - totalInitialCPUUsage int + initPods int32 + totalInitialCPUUsage int32 perPodCPURequest int64 - targetCPUUtilizationPercent int - minPods int - maxPods int - firstScale int + targetCPUUtilizationPercent int32 + minPods int32 + maxPods int32 + firstScale int32 firstScaleStasis time.Duration cpuBurst int - secondScale int + secondScale int32 secondScaleStasis time.Duration useV1 bool } @@ -125,16 +125,16 @@ type HPAScaleTest struct { // The second state change (optional) is due to the CPU burst parameter, which HPA again responds to. // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *framework.Framework) { - rc = NewDynamicResourceConsumer(name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 100, f) + rc = NewDynamicResourceConsumer(name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 100, f) defer rc.CleanUp() createCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods, scaleTest.useV1) - rc.WaitForReplicas(scaleTest.firstScale) + rc.WaitForReplicas(int(scaleTest.firstScale)) if scaleTest.firstScaleStasis > 0 { - rc.EnsureDesiredReplicas(scaleTest.firstScale, scaleTest.firstScaleStasis) + rc.EnsureDesiredReplicas(int(scaleTest.firstScale), scaleTest.firstScaleStasis) } if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 { rc.ConsumeCPU(scaleTest.cpuBurst) - rc.WaitForReplicas(scaleTest.secondScale) + rc.WaitForReplicas(int(scaleTest.secondScale)) } } @@ -170,7 +170,7 @@ func scaleDown(name, kind string, rc *ResourceConsumer, f *framework.Framework) scaleTest.run(name, kind, rc, f) } -func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int, useV1 bool) { +func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32, useV1 bool) { hpa := &extensions.HorizontalPodAutoscaler{ ObjectMeta: api.ObjectMeta{ Name: rc.name, diff --git a/test/e2e/job.go b/test/e2e/job.go index 05e03b7280..59c9565354 100644 --- a/test/e2e/job.go +++ b/test/e2e/job.go @@ -42,9 +42,9 @@ const ( var _ = framework.KubeDescribe("Job", func() { f := framework.NewDefaultFramework("job") - parallelism := 2 - completions := 4 - lotsOfFailures := 5 // more than completions + parallelism := int32(2) + completions := int32(4) + lotsOfFailures := int32(5) // more than completions // Simplest case: all pods succeed promptly It("should run a job to completion when tasks succeed", func() { @@ -112,8 +112,8 @@ var _ = framework.KubeDescribe("Job", func() { }) It("should scale a job up", func() { - startParallelism := 1 - endParallelism := 2 + startParallelism := int32(1) + endParallelism := int32(2) By("Creating a job") job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) job, err := createJob(f.Client, f.Namespace.Name, job) @@ -137,8 +137,8 @@ var _ = framework.KubeDescribe("Job", func() { }) It("should scale a job down", func() { - startParallelism := 2 - endParallelism := 1 + startParallelism := int32(2) + endParallelism := int32(1) By("Creating a job") job := newTestJob("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions) job, err := createJob(f.Client, f.Namespace.Name, job) @@ -199,7 +199,7 @@ var _ = framework.KubeDescribe("Job", func() { }) // newTestJob returns a job which does one of several testing behaviors. -func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, completions int) *batch.Job { +func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, completions int32) *batch.Job { job := &batch.Job{ ObjectMeta: api.ObjectMeta{ Name: name, @@ -270,7 +270,7 @@ func deleteJob(c *client.Client, ns, name string) error { } // Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy. -func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int) error { +func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int32) error { label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName})) return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { options := api.ListOptions{LabelSelector: label} @@ -278,7 +278,7 @@ func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int if err != nil { return false, err } - count := 0 + count := int32(0) for _, p := range pods.Items { if p.Status.Phase == api.PodRunning { count++ @@ -289,7 +289,7 @@ func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int } // Wait for job to reach completions. -func waitForJobFinish(c *client.Client, ns, jobName string, completions int) error { +func waitForJobFinish(c *client.Client, ns, jobName string, completions int32) error { return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { curr, err := c.Extensions().Jobs(ns).Get(jobName) if err != nil { diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index 18f7ac1354..0940e2e1f0 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -770,7 +770,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { framework.Failf("1 port is expected") } port := service.Spec.Ports[0] - if port.Port != servicePort { + if port.Port != int32(servicePort) { framework.Failf("Wrong service port: %d", port.Port) } if port.TargetPort.IntValue() != redisPort { diff --git a/test/e2e/mesos.go b/test/e2e/mesos.go index 1862840a4c..242f893f76 100644 --- a/test/e2e/mesos.go +++ b/test/e2e/mesos.go @@ -68,7 +68,7 @@ var _ = framework.KubeDescribe("Mesos", func() { nodelist := framework.ListSchedulableNodesOrDie(f.Client) const ns = "static-pods" - numpods := len(nodelist.Items) + numpods := int32(len(nodelist.Items)) framework.ExpectNoError(framework.WaitForPodsRunningReady(ns, numpods, wait.ForeverTestTimeout), fmt.Sprintf("number of static pods in namespace %s is %d", ns, numpods)) }) diff --git a/test/e2e/pods.go b/test/e2e/pods.go index 66b67cf107..6ba649fcb9 100644 --- a/test/e2e/pods.go +++ b/test/e2e/pods.go @@ -80,7 +80,7 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe // Wait for the restart state to be as desired. deadline := time.Now().Add(timeout) lastRestartCount := initialRestartCount - observedRestarts := 0 + observedRestarts := int32(0) for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) { pod, err = c.Pods(ns).Get(podDescr.Name) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podDescr.Name)) @@ -94,7 +94,7 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe } } observedRestarts = restartCount - initialRestartCount - if expectNumRestarts > 0 && observedRestarts >= expectNumRestarts { + if expectNumRestarts > 0 && int(observedRestarts) >= expectNumRestarts { // Stop if we have observed more than expectNumRestarts restarts. break } @@ -104,7 +104,7 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe // If we expected 0 restarts, fail if observed any restart. // If we expected n restarts (n > 0), fail if we observed < n restarts. if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 && - observedRestarts < expectNumRestarts) { + int(observedRestarts) < expectNumRestarts) { framework.Failf("pod %s/%s - expected number of restarts: %t, found restarts: %t", ns, podDescr.Name, expectNumRestarts, observedRestarts) } diff --git a/test/e2e/rc.go b/test/e2e/rc.go index 9496276665..87efda2f14 100644 --- a/test/e2e/rc.go +++ b/test/e2e/rc.go @@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("ReplicationController", func() { // which is checked for each replica. func ServeImageOrFail(f *framework.Framework, test string, image string) { name := "my-hostname-" + test + "-" + string(util.NewUUID()) - replicas := 2 + replicas := int32(2) // Create a replication controller for a service // that serves its hostname. diff --git a/test/e2e/replica_set.go b/test/e2e/replica_set.go index e8acda5c45..9fae8d198c 100644 --- a/test/e2e/replica_set.go +++ b/test/e2e/replica_set.go @@ -51,7 +51,7 @@ var _ = framework.KubeDescribe("ReplicaSet", func() { // image serves its hostname which is checked for each replica. func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image string) { name := "my-hostname-" + test + "-" + string(util.NewUUID()) - replicas := 2 + replicas := int32(2) // Create a ReplicaSet for a service that serves its hostname. // The source for the Docker containter kubernetes/serve_hostname is diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go index 2ad692f1e4..0c1ed791d5 100644 --- a/test/e2e/resize_nodes.go +++ b/test/e2e/resize_nodes.go @@ -53,7 +53,7 @@ const ( testPort = 9376 ) -func resizeGroup(size int) error { +func resizeGroup(size int32) error { if framework.TestContext.ReportDir != "" { framework.CoreDump(framework.TestContext.ReportDir) defer framework.CoreDump(framework.TestContext.ReportDir) @@ -70,7 +70,7 @@ func resizeGroup(size int) error { return err } else if framework.TestContext.Provider == "aws" { client := autoscaling.New(session.New()) - return awscloud.ResizeInstanceGroup(client, framework.TestContext.CloudConfig.NodeInstanceGroup, size) + return awscloud.ResizeInstanceGroup(client, framework.TestContext.CloudConfig.NodeInstanceGroup, int(size)) } else { return fmt.Errorf("Provider does not support InstanceGroups") } @@ -103,7 +103,7 @@ func groupSize() (int, error) { } } -func waitForGroupSize(size int) error { +func waitForGroupSize(size int32) error { timeout := 10 * time.Minute for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { currentSize, err := groupSize() @@ -111,7 +111,7 @@ func waitForGroupSize(size int) error { framework.Logf("Failed to get node instance group size: %v", err) continue } - if currentSize != size { + if currentSize != int(size) { framework.Logf("Waiting for node instance group size %d, current size %d", size, currentSize) continue } @@ -132,7 +132,7 @@ func svcByName(name string, port int) *api.Service { "name": name, }, Ports: []api.ServicePort{{ - Port: port, + Port: int32(port), TargetPort: intstr.FromInt(port), }}, }, @@ -176,22 +176,22 @@ func newPodOnNode(c *client.Client, namespace, podName, nodeName string) error { return err } -func rcByName(name string, replicas int, image string, labels map[string]string) *api.ReplicationController { +func rcByName(name string, replicas int32, image string, labels map[string]string) *api.ReplicationController { return rcByNameContainer(name, replicas, image, labels, api.Container{ Name: name, Image: image, }) } -func rcByNamePort(name string, replicas int, image string, port int, protocol api.Protocol, labels map[string]string) *api.ReplicationController { +func rcByNamePort(name string, replicas int32, image string, port int, protocol api.Protocol, labels map[string]string) *api.ReplicationController { return rcByNameContainer(name, replicas, image, labels, api.Container{ Name: name, Image: image, - Ports: []api.ContainerPort{{ContainerPort: port, Protocol: protocol}}, + Ports: []api.ContainerPort{{ContainerPort: int32(port), Protocol: protocol}}, }) } -func rcByNameContainer(name string, replicas int, image string, labels map[string]string, c api.Container) *api.ReplicationController { +func rcByNameContainer(name string, replicas int32, image string, labels map[string]string, c api.Container) *api.ReplicationController { // Add "name": name to the labels, overwriting if it exists. labels["name"] = name gracePeriod := int64(0) @@ -222,13 +222,13 @@ func rcByNameContainer(name string, replicas int, image string, labels map[strin } // newRCByName creates a replication controller with a selector by name of name. -func newRCByName(c *client.Client, ns, name string, replicas int) (*api.ReplicationController, error) { +func newRCByName(c *client.Client, ns, name string, replicas int32) (*api.ReplicationController, error) { By(fmt.Sprintf("creating replication controller %s", name)) return c.ReplicationControllers(ns).Create(rcByNamePort( name, replicas, serveHostnameImage, 9376, api.ProtocolTCP, map[string]string{})) } -func resizeRC(c *client.Client, ns, name string, replicas int) error { +func resizeRC(c *client.Client, ns, name string, replicas int32) error { rc, err := c.ReplicationControllers(ns).Get(name) if err != nil { return err @@ -285,7 +285,7 @@ func getNodeExternalIP(node *api.Node) string { // At the end (even in case of errors), the network traffic is brought back to normal. // This function executes commands on a node so it will work only for some // environments. -func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replicas int, podNameToDisappear string, node *api.Node) { +func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replicas int32, podNameToDisappear string, node *api.Node) { host := getNodeExternalIP(node) master := getMaster(c) By(fmt.Sprintf("block network traffic from node %s to the master", node.Name)) @@ -343,7 +343,7 @@ func expectNodeReadiness(isReady bool, newNode chan *api.Node) { var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { f := framework.NewDefaultFramework("resize-nodes") - var systemPodsNo int + var systemPodsNo int32 var c *client.Client var ns string BeforeEach(func() { @@ -351,7 +351,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { ns = f.Namespace.Name systemPods, err := c.Pods(api.NamespaceSystem).List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) - systemPodsNo = len(systemPods.Items) + systemPodsNo = int32(len(systemPods.Items)) }) // Slow issue #13323 (8 min) @@ -371,7 +371,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { } By("restoring the original node instance group size") - if err := resizeGroup(framework.TestContext.CloudConfig.NumNodes); err != nil { + if err := resizeGroup(int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { framework.Failf("Couldn't restore the original node instance group size: %v", err) } // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a @@ -386,7 +386,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { By("waiting 5 minutes for all dead tunnels to be dropped") time.Sleep(5 * time.Minute) } - if err := waitForGroupSize(framework.TestContext.CloudConfig.NumNodes); err != nil { + if err := waitForGroupSize(int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { framework.Failf("Couldn't restore the original node instance group size: %v", err) } if err := framework.WaitForClusterSize(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil { @@ -404,7 +404,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { // Create a replication controller for a service that serves its hostname. // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname name := "my-hostname-delete-node" - replicas := framework.TestContext.CloudConfig.NumNodes + replicas := int32(framework.TestContext.CloudConfig.NumNodes) newRCByName(c, ns, name, replicas) err := framework.VerifyPods(c, ns, name, true, replicas) Expect(err).NotTo(HaveOccurred()) @@ -414,7 +414,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { Expect(err).NotTo(HaveOccurred()) err = waitForGroupSize(replicas - 1) Expect(err).NotTo(HaveOccurred()) - err = framework.WaitForClusterSize(c, replicas-1, 10*time.Minute) + err = framework.WaitForClusterSize(c, int(replicas-1), 10*time.Minute) Expect(err).NotTo(HaveOccurred()) By("verifying whether the pods from the removed node are recreated") @@ -428,7 +428,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname name := "my-hostname-add-node" newSVCByName(c, ns, name) - replicas := framework.TestContext.CloudConfig.NumNodes + replicas := int32(framework.TestContext.CloudConfig.NumNodes) newRCByName(c, ns, name, replicas) err := framework.VerifyPods(c, ns, name, true, replicas) Expect(err).NotTo(HaveOccurred()) @@ -438,7 +438,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { Expect(err).NotTo(HaveOccurred()) err = waitForGroupSize(replicas + 1) Expect(err).NotTo(HaveOccurred()) - err = framework.WaitForClusterSize(c, replicas+1, 10*time.Minute) + err = framework.WaitForClusterSize(c, int(replicas+1), 10*time.Minute) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", replicas+1)) @@ -469,7 +469,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname name := "my-hostname-net" newSVCByName(c, ns, name) - replicas := framework.TestContext.CloudConfig.NumNodes + replicas := int32(framework.TestContext.CloudConfig.NumNodes) newRCByName(c, ns, name, replicas) err := framework.VerifyPods(c, ns, name, true, replicas) Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding") diff --git a/test/e2e/resource_quota.go b/test/e2e/resource_quota.go index 3ae417ffa7..fdb55139d7 100644 --- a/test/e2e/resource_quota.go +++ b/test/e2e/resource_quota.go @@ -739,7 +739,7 @@ func newTestPersistentVolumeClaimForQuota(name string) *api.PersistentVolumeClai } // newTestReplicationControllerForQuota returns a simple replication controller -func newTestReplicationControllerForQuota(name, image string, replicas int) *api.ReplicationController { +func newTestReplicationControllerForQuota(name, image string, replicas int32) *api.ReplicationController { return &api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: name, diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index 6a094de484..95c2d21ccc 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -189,7 +189,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { } } - err = framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout) + err = framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout) Expect(err).NotTo(HaveOccurred()) for _, node := range nodeList.Items { diff --git a/test/e2e/service.go b/test/e2e/service.go index 08dd2547df..b7f9ac2c8b 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -194,13 +194,13 @@ var _ = framework.KubeDescribe("Services", func() { containerPorts1 := []api.ContainerPort{ { Name: svc1port, - ContainerPort: port1, + ContainerPort: int32(port1), }, } containerPorts2 := []api.ContainerPort{ { Name: svc2port, - ContainerPort: port2, + ContainerPort: int32(port2), }, } @@ -385,7 +385,7 @@ var _ = framework.KubeDescribe("Services", func() { svc.Spec.Type = api.ServiceTypeNodePort }) jig.SanityCheckService(service, api.ServiceTypeNodePort) - nodePort := service.Spec.Ports[0].NodePort + nodePort := int(service.Spec.Ports[0].NodePort) By("creating pod to be part of service " + serviceName) jig.RunOrFail(ns, nil) @@ -446,7 +446,7 @@ var _ = framework.KubeDescribe("Services", func() { if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port { framework.Failf("expected to use the same port for TCP and UDP") } - svcPort := tcpService.Spec.Ports[0].Port + svcPort := int(tcpService.Spec.Ports[0].Port) framework.Logf("service port (TCP and UDP): %d", svcPort) By("creating a pod to be part of the TCP service " + serviceName) @@ -462,7 +462,7 @@ var _ = framework.KubeDescribe("Services", func() { s.Spec.Type = api.ServiceTypeNodePort }) jig.SanityCheckService(tcpService, api.ServiceTypeNodePort) - tcpNodePort := tcpService.Spec.Ports[0].NodePort + tcpNodePort := int(tcpService.Spec.Ports[0].NodePort) framework.Logf("TCP node port: %d", tcpNodePort) By("changing the UDP service to type=NodePort") @@ -470,7 +470,7 @@ var _ = framework.KubeDescribe("Services", func() { s.Spec.Type = api.ServiceTypeNodePort }) jig.SanityCheckService(udpService, api.ServiceTypeNodePort) - udpNodePort := udpService.Spec.Ports[0].NodePort + udpNodePort := int(udpService.Spec.Ports[0].NodePort) framework.Logf("UDP node port: %d", udpNodePort) By("hitting the TCP service's NodePort") @@ -516,7 +516,7 @@ var _ = framework.KubeDescribe("Services", func() { // Wait for the load balancer to be created asynchronously tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name) jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) - if tcpService.Spec.Ports[0].NodePort != tcpNodePort { + if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) } if requestedIP != "" && getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { @@ -547,7 +547,7 @@ var _ = framework.KubeDescribe("Services", func() { // 2nd one should be faster since they ran in parallel. udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name) jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) - if udpService.Spec.Ports[0].NodePort != udpNodePort { + if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) } udpIngressIP = getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) @@ -579,7 +579,7 @@ var _ = framework.KubeDescribe("Services", func() { tcpService = jig.ChangeServiceNodePortOrFail(ns1, tcpService.Name, tcpNodePort) jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) tcpNodePortOld := tcpNodePort - tcpNodePort = tcpService.Spec.Ports[0].NodePort + tcpNodePort = int(tcpService.Spec.Ports[0].NodePort) if tcpNodePort == tcpNodePortOld { framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) } @@ -596,7 +596,7 @@ var _ = framework.KubeDescribe("Services", func() { jig.SanityCheckService(udpService, api.ServiceTypeNodePort) } udpNodePortOld := udpNodePort - udpNodePort = udpService.Spec.Ports[0].NodePort + udpNodePort = int(udpService.Spec.Ports[0].NodePort) if udpNodePort == udpNodePortOld { framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) } @@ -633,11 +633,11 @@ var _ = framework.KubeDescribe("Services", func() { }) jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) svcPortOld := svcPort - svcPort = tcpService.Spec.Ports[0].Port + svcPort = int(tcpService.Spec.Ports[0].Port) if svcPort == svcPortOld { framework.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort) } - if tcpService.Spec.Ports[0].NodePort != tcpNodePort { + if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) } if getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { @@ -653,10 +653,10 @@ var _ = framework.KubeDescribe("Services", func() { } else { jig.SanityCheckService(udpService, api.ServiceTypeNodePort) } - if udpService.Spec.Ports[0].Port != svcPort { + if int(udpService.Spec.Ports[0].Port) != svcPort { framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port) } - if udpService.Spec.Ports[0].NodePort != udpNodePort { + if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) } if loadBalancerSupportsUDP && getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { @@ -801,7 +801,7 @@ var _ = framework.KubeDescribe("Services", func() { if port.NodePort == 0 { framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } - if !ServiceNodePortRange.Contains(port.NodePort) { + if !ServiceNodePortRange.Contains(int(port.NodePort)) { framework.Failf("got unexpected (out-of-range) port for new service: %v", service) } @@ -815,7 +815,7 @@ var _ = framework.KubeDescribe("Services", func() { } By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort)) result, err := updateService(c, ns, serviceName, func(s *api.Service) { - s.Spec.Ports[0].NodePort = outOfRangeNodePort + s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) }) if err == nil { framework.Failf("failed to prevent update of service with out-of-range NodePort: %v", result) @@ -830,7 +830,7 @@ var _ = framework.KubeDescribe("Services", func() { By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort)) service = t.BuildServiceSpec() service.Spec.Type = api.ServiceTypeNodePort - service.Spec.Ports[0].NodePort = outOfRangeNodePort + service.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) service, err = t.CreateService(service) if err == nil { framework.Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service) @@ -869,7 +869,7 @@ var _ = framework.KubeDescribe("Services", func() { if port.NodePort == 0 { framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } - if !ServiceNodePortRange.Contains(port.NodePort) { + if !ServiceNodePortRange.Contains(int(port.NodePort)) { framework.Failf("got unexpected (out-of-range) port for new service: %v", service) } nodePort := port.NodePort @@ -938,11 +938,11 @@ func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID { key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort) mesosContainerPortString := endpoints.Annotations[key] if mesosContainerPortString != "" { - var err error - containerPort, err = strconv.Atoi(mesosContainerPortString) + mesosContainerPort, err := strconv.Atoi(mesosContainerPortString) if err != nil { continue } + containerPort = int32(mesosContainerPort) framework.Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString) } @@ -950,7 +950,7 @@ func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID { if _, ok := m[addr.TargetRef.UID]; !ok { m[addr.TargetRef.UID] = make([]int, 0) } - m[addr.TargetRef.UID] = append(m[addr.TargetRef.UID], containerPort) + m[addr.TargetRef.UID] = append(m[addr.TargetRef.UID], int(containerPort)) } } } @@ -1282,7 +1282,7 @@ func startServeHostnameService(c *client.Client, ns, name string, port, replicas }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ - Port: port, + Port: int32(port), TargetPort: intstr.FromInt(9376), Protocol: "TCP", }}, @@ -1543,7 +1543,7 @@ func (j *ServiceTestJig) SanityCheckService(svc *api.Service, svcType api.Servic framework.Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort) } if hasNodePort { - if !ServiceNodePortRange.Contains(port.NodePort) { + if !ServiceNodePortRange.Contains(int(port.NodePort)) { framework.Failf("out-of-range nodePort (%d) for service", port.NodePort) } } @@ -1606,7 +1606,7 @@ func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, ini offs2 := (offs1 + i) % ServiceNodePortRange.Size newPort := ServiceNodePortRange.Base + offs2 service, err = j.UpdateService(namespace, name, func(s *api.Service) { - s.Spec.Ports[0].NodePort = newPort + s.Spec.Ports[0].NodePort = int32(newPort) }) if err != nil && strings.Contains(err.Error(), "provided port is already allocated") { framework.Logf("tried nodePort %d, but it is in use, will try another", newPort) @@ -1754,7 +1754,7 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *api.Replicat if err != nil { framework.Failf("Failed to created RC %q: %v", rc.Name, err) } - pods, err := j.waitForPodsCreated(namespace, rc.Spec.Replicas) + pods, err := j.waitForPodsCreated(namespace, int(rc.Spec.Replicas)) if err != nil { framework.Failf("Failed to create pods: %v", err) } @@ -1854,7 +1854,7 @@ func (t *ServiceTestFixture) BuildServiceSpec() *api.Service { // CreateWebserverRC creates rc-backed pods with the well-known webserver // configuration and records it for cleanup. -func (t *ServiceTestFixture) CreateWebserverRC(replicas int) *api.ReplicationController { +func (t *ServiceTestFixture) CreateWebserverRC(replicas int32) *api.ReplicationController { rcSpec := rcByNamePort(t.name, replicas, t.image, 80, api.ProtocolTCP, t.Labels) rcAct, err := t.createRC(rcSpec) if err != nil { diff --git a/test/e2e/ubernetes_lite.go b/test/e2e/ubernetes_lite.go index 16a8adde26..29dfae604d 100644 --- a/test/e2e/ubernetes_lite.go +++ b/test/e2e/ubernetes_lite.go @@ -52,7 +52,7 @@ var _ = framework.KubeDescribe("Ubernetes Lite", func() { }) It("should spread the pods of a replication controller across zones", func() { - SpreadRCOrFail(f, (2*zoneCount)+1, image) + SpreadRCOrFail(f, int32((2*zoneCount)+1), image) }) }) @@ -181,7 +181,7 @@ func checkZoneSpreading(c *client.Client, pods *api.PodList, zoneNames []string) } // Check that the pods comprising a replication controller get spread evenly across available zones -func SpreadRCOrFail(f *framework.Framework, replicaCount int, image string) { +func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { name := "ubelite-spread-rc-" + string(util.NewUUID()) By(fmt.Sprintf("Creating replication controller %s", name)) controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{ diff --git a/test/e2e/volumes.go b/test/e2e/volumes.go index 00574e6d9b..a6d41f75e9 100644 --- a/test/e2e/volumes.go +++ b/test/e2e/volumes.go @@ -87,7 +87,7 @@ func startVolumeServer(client *client.Client, config VolumeTestConfig) *api.Pod serverPodPorts[i] = api.ContainerPort{ Name: portName, - ContainerPort: config.serverPorts[i], + ContainerPort: int32(config.serverPorts[i]), Protocol: api.ProtocolTCP, } } diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index d66acc5686..73115d8fcb 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -241,7 +241,7 @@ func StopRC(rc *api.ReplicationController, restClient *client.Client) error { } // ScaleRC scales the given rc to the given replicas. -func ScaleRC(name, ns string, replicas int, restClient *client.Client) (*api.ReplicationController, error) { +func ScaleRC(name, ns string, replicas int32, restClient *client.Client) (*api.ReplicationController, error) { scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), restClient) if err != nil { return nil, err @@ -291,7 +291,7 @@ func StartPods(numPods int, host string, restClient *client.Client) error { controller := RCFromManifest(TestRCManifest) // Make the rc unique to the given host. - controller.Spec.Replicas = numPods + controller.Spec.Replicas = int32(numPods) controller.Spec.Template.Spec.NodeName = host controller.Name = controller.Name + host controller.Spec.Selector["host"] = host diff --git a/test/integration/quota_test.go b/test/integration/quota_test.go index 96d288ac70..cfddcbcf60 100644 --- a/test/integration/quota_test.go +++ b/test/integration/quota_test.go @@ -156,7 +156,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { Namespace: namespace, }, Spec: api.ReplicationControllerSpec{ - Replicas: target, + Replicas: int32(target), Selector: map[string]string{"foo": "bar"}, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -194,7 +194,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { switch cast := event.Object.(type) { case *api.ReplicationController: - if cast.Status.Replicas == target { + if int(cast.Status.Replicas) == target { return true, nil } }