mirror of https://github.com/k3s-io/k3s
Fix the rest of the code
parent
8d0187add2
commit
fdb110c859
|
@ -192,7 +192,7 @@ func (ks *kube2sky) generateRecordsForHeadlessService(subdomain string, e *kapi.
|
|||
endpointPort := &e.Subsets[idx].Ports[portIdx]
|
||||
portSegment := buildPortSegmentString(endpointPort.Name, endpointPort.Protocol)
|
||||
if portSegment != "" {
|
||||
err := ks.generateSRVRecord(subdomain, portSegment, recordLabel, recordKey, endpointPort.Port)
|
||||
err := ks.generateSRVRecord(subdomain, portSegment, recordLabel, recordKey, int(endpointPort.Port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -343,7 +343,7 @@ func (ks *kube2sky) generateRecordsForPortalService(subdomain string, service *k
|
|||
port := &service.Spec.Ports[i]
|
||||
portSegment := buildPortSegmentString(port.Name, port.Protocol)
|
||||
if portSegment != "" {
|
||||
err = ks.generateSRVRecord(subdomain, portSegment, recordLabel, subdomain, port.Port)
|
||||
err = ks.generateSRVRecord(subdomain, portSegment, recordLabel, subdomain, int(port.Port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ type hostPort struct {
|
|||
func getHostPort(service *kapi.Service) *hostPort {
|
||||
return &hostPort{
|
||||
Host: service.Spec.ClusterIP,
|
||||
Port: service.Spec.Ports[0].Port,
|
||||
Port: int(service.Spec.Ports[0].Port),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -181,7 +181,7 @@ func newService(namespace, serviceName, clusterIP, portName string, portNumber i
|
|||
Spec: kapi.ServiceSpec{
|
||||
ClusterIP: clusterIP,
|
||||
Ports: []kapi.ServicePort{
|
||||
{Port: portNumber, Name: portName, Protocol: "TCP"},
|
||||
{Port: int32(portNumber), Name: portName, Protocol: "TCP"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ func newSubset() kapi.EndpointSubset {
|
|||
|
||||
func newSubsetWithOnePort(portName string, port int, ips ...string) kapi.EndpointSubset {
|
||||
subset := newSubset()
|
||||
subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: port, Name: portName, Protocol: "TCP"})
|
||||
subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: int32(port), Name: portName, Protocol: "TCP"})
|
||||
for _, ip := range ips {
|
||||
subset.Addresses = append(subset.Addresses, kapi.EndpointAddress{IP: ip})
|
||||
}
|
||||
|
@ -221,7 +221,7 @@ func newSubsetWithOnePort(portName string, port int, ips ...string) kapi.Endpoin
|
|||
|
||||
func newSubsetWithTwoPorts(portName1 string, portNumber1 int, portName2 string, portNumber2 int, ips ...string) kapi.EndpointSubset {
|
||||
subset := newSubsetWithOnePort(portName1, portNumber1, ips...)
|
||||
subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: portNumber2, Name: portName2, Protocol: "TCP"})
|
||||
subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: int32(portNumber2), Name: portName2, Protocol: "TCP"})
|
||||
return subset
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ func Run(s *options.CMServer) error {
|
|||
kubeconfig.ContentConfig.ContentType = s.ContentType
|
||||
// Override kubeconfig qps/burst settings from flags
|
||||
kubeconfig.QPS = s.KubeAPIQPS
|
||||
kubeconfig.Burst = s.KubeAPIBurst
|
||||
kubeconfig.Burst = int(s.KubeAPIBurst)
|
||||
|
||||
kubeClient, err := client.New(kubeconfig)
|
||||
if err != nil {
|
||||
|
@ -144,7 +144,7 @@ func Run(s *options.CMServer) error {
|
|||
mux.Handle("/metrics", prometheus.Handler())
|
||||
|
||||
server := &http.Server{
|
||||
Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)),
|
||||
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
|
||||
Handler: mux,
|
||||
}
|
||||
glog.Fatal(server.ListenAndServe())
|
||||
|
@ -198,7 +198,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
informers[reflect.TypeOf(&api.Pod{})] = podInformer
|
||||
|
||||
go endpointcontroller.NewEndpointController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller"))).
|
||||
Run(s.ConcurrentEndpointSyncs, wait.NeverStop)
|
||||
Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
|
||||
go replicationcontroller.NewReplicationManager(
|
||||
|
@ -206,12 +206,12 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replication-controller")),
|
||||
ResyncPeriod(s),
|
||||
replicationcontroller.BurstReplicas,
|
||||
s.LookupCacheSizeForRC,
|
||||
).Run(s.ConcurrentRCSyncs, wait.NeverStop)
|
||||
int(s.LookupCacheSizeForRC),
|
||||
).Run(int(s.ConcurrentRCSyncs), wait.NeverStop)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
|
||||
if s.TerminatedPodGCThreshold > 0 {
|
||||
go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), s.TerminatedPodGCThreshold).
|
||||
go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), int(s.TerminatedPodGCThreshold)).
|
||||
Run(wait.NeverStop)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
}
|
||||
|
@ -224,8 +224,8 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
// this cidr has been validated already
|
||||
_, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR)
|
||||
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")),
|
||||
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
|
||||
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
|
||||
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod.Duration)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
|
@ -268,7 +268,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
ReplenishmentResyncPeriod: ResyncPeriod(s),
|
||||
GroupKindsToReplenish: groupKindsToReplenish,
|
||||
}
|
||||
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
|
||||
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), wait.NeverStop)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
|
@ -299,7 +299,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
glog.Fatalf("Failed to get supported resources from server: %v", err)
|
||||
}
|
||||
namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, groupVersionResources, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes)
|
||||
go namespaceController.Run(s.ConcurrentNamespaceSyncs, wait.NeverStop)
|
||||
go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
|
||||
groupVersion := "extensions/v1beta1"
|
||||
|
@ -324,29 +324,29 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
|
||||
if containsResource(resources, "daemonsets") {
|
||||
glog.Infof("Starting daemon set controller")
|
||||
go daemon.NewDaemonSetsController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s), s.LookupCacheSizeForDaemonSet).
|
||||
Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop)
|
||||
go daemon.NewDaemonSetsController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s), int(s.LookupCacheSizeForDaemonSet)).
|
||||
Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
}
|
||||
|
||||
if containsResource(resources, "jobs") {
|
||||
glog.Infof("Starting job controller")
|
||||
go job.NewJobController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller"))).
|
||||
Run(s.ConcurrentJobSyncs, wait.NeverStop)
|
||||
Run(int(s.ConcurrentJobSyncs), wait.NeverStop)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
}
|
||||
|
||||
if containsResource(resources, "deployments") {
|
||||
glog.Infof("Starting deployment controller")
|
||||
go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), ResyncPeriod(s)).
|
||||
Run(s.ConcurrentDeploymentSyncs, wait.NeverStop)
|
||||
Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
}
|
||||
|
||||
if containsResource(resources, "replicasets") {
|
||||
glog.Infof("Starting ReplicaSet controller")
|
||||
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, s.LookupCacheSizeForRS).
|
||||
Run(s.ConcurrentRSSyncs, wait.NeverStop)
|
||||
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)).
|
||||
Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
}
|
||||
}
|
||||
|
@ -364,7 +364,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
|
||||
s.PVClaimBinderSyncPeriod.Duration,
|
||||
s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry,
|
||||
int(s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry),
|
||||
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
|
||||
cloud,
|
||||
)
|
||||
|
|
|
@ -92,19 +92,19 @@ func NewCMServer() *CMServer {
|
|||
|
||||
// AddFlags adds flags for a specific CMServer to the specified FlagSet
|
||||
func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on")
|
||||
fs.Int32Var(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on")
|
||||
fs.Var(componentconfig.IPVar{Val: &s.Address}, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.")
|
||||
fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
|
||||
fs.IntVar(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
|
||||
fs.IntVar(&s.ConcurrentRCSyncs, "concurrent_rc_syncs", s.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load")
|
||||
fs.IntVar(&s.ConcurrentRSSyncs, "concurrent-replicaset-syncs", s.ConcurrentRSSyncs, "The number of replica sets that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load")
|
||||
fs.IntVar(&s.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load")
|
||||
fs.IntVar(&s.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load")
|
||||
fs.IntVar(&s.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load")
|
||||
fs.IntVar(&s.LookupCacheSizeForRC, "replication-controller-lookup-cache-size", s.LookupCacheSizeForRC, "The the size of lookup cache for replication controllers. Larger number = more responsive replica management, but more MEM load.")
|
||||
fs.IntVar(&s.LookupCacheSizeForRS, "replicaset-lookup-cache-size", s.LookupCacheSizeForRS, "The the size of lookup cache for replicatsets. Larger number = more responsive replica management, but more MEM load.")
|
||||
fs.IntVar(&s.LookupCacheSizeForDaemonSet, "daemonset-lookup-cache-size", s.LookupCacheSizeForDaemonSet, "The the size of lookup cache for daemonsets. Larger number = more responsive daemonsets, but more MEM load.")
|
||||
fs.Int32Var(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.ConcurrentRCSyncs, "concurrent_rc_syncs", s.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.ConcurrentRSSyncs, "concurrent-replicaset-syncs", s.ConcurrentRSSyncs, "The number of replica sets that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.LookupCacheSizeForRC, "replication-controller-lookup-cache-size", s.LookupCacheSizeForRC, "The the size of lookup cache for replication controllers. Larger number = more responsive replica management, but more MEM load.")
|
||||
fs.Int32Var(&s.LookupCacheSizeForRS, "replicaset-lookup-cache-size", s.LookupCacheSizeForRS, "The the size of lookup cache for replicatsets. Larger number = more responsive replica management, but more MEM load.")
|
||||
fs.Int32Var(&s.LookupCacheSizeForDaemonSet, "daemonset-lookup-cache-size", s.LookupCacheSizeForDaemonSet, "The the size of lookup cache for daemonsets. Larger number = more responsive daemonsets, but more MEM load.")
|
||||
fs.DurationVar(&s.ServiceSyncPeriod.Duration, "service-sync-period", s.ServiceSyncPeriod.Duration, "The period for syncing services with their external load balancers")
|
||||
fs.DurationVar(&s.NodeSyncPeriod.Duration, "node-sync-period", s.NodeSyncPeriod.Duration, ""+
|
||||
"The period for syncing nodes from cloudprovider. Longer periods will result in "+
|
||||
|
@ -114,19 +114,19 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.DurationVar(&s.PVClaimBinderSyncPeriod.Duration, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod.Duration, "The period for syncing persistent volumes and persistent volume claims")
|
||||
fs.DurationVar(&s.MinResyncPeriod.Duration, "min-resync-period", s.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod")
|
||||
fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "pv-recycler-pod-template-filepath-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "The file path to a pod definition used as a template for NFS persistent volume recycling")
|
||||
fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod")
|
||||
fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod")
|
||||
fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod")
|
||||
fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod")
|
||||
fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.BoolVar(&s.VolumeConfiguration.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.VolumeConfiguration.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
|
||||
fs.IntVar(&s.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.")
|
||||
fs.Int32Var(&s.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.")
|
||||
fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.")
|
||||
fs.DurationVar(&s.DeploymentControllerSyncPeriod.Duration, "deployment-controller-sync-period", s.DeploymentControllerSyncPeriod.Duration, "Period for syncing the deployments.")
|
||||
fs.DurationVar(&s.PodEvictionTimeout.Duration, "pod-eviction-timeout", s.PodEvictionTimeout.Duration, "The grace period for deleting pods on failed nodes.")
|
||||
fs.Float32Var(&s.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.")
|
||||
fs.IntVar(&s.DeletingPodsBurst, "deleting-pods-burst", 10, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
|
||||
fs.IntVar(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+
|
||||
fs.Int32Var(&s.DeletingPodsBurst, "deleting-pods-burst", 10, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
|
||||
fs.Int32Var(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+
|
||||
"The number of retries for initial node registration. Retry interval equals node-sync-period.")
|
||||
fs.MarkDeprecated("register-retry-count", "This flag is currently no-op and will be deleted.")
|
||||
fs.DurationVar(&s.NodeMonitorGracePeriod.Duration, "node-monitor-grace-period", s.NodeMonitorGracePeriod.Duration,
|
||||
|
@ -147,7 +147,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.StringVar(&s.RootCAFile, "root-ca-file", s.RootCAFile, "If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.")
|
||||
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.")
|
||||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
||||
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.DurationVar(&s.ControllerStartInterval.Duration, "controller-start-interval", s.ControllerStartInterval.Duration, "Interval between starting controller managers.")
|
||||
leaderelection.BindFlags(&s.LeaderElection, fs)
|
||||
}
|
||||
|
|
|
@ -57,8 +57,8 @@ func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) []
|
|||
|
||||
// HostPath recycling is for testing and development purposes only!
|
||||
hostPathConfig := volume.VolumeConfig{
|
||||
RecyclerMinimumTimeout: config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath,
|
||||
RecyclerTimeoutIncrement: config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath,
|
||||
RecyclerMinimumTimeout: int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath),
|
||||
RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath),
|
||||
RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(),
|
||||
}
|
||||
if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil {
|
||||
|
@ -67,8 +67,8 @@ func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) []
|
|||
allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(hostPathConfig)...)
|
||||
|
||||
nfsConfig := volume.VolumeConfig{
|
||||
RecyclerMinimumTimeout: config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS,
|
||||
RecyclerTimeoutIncrement: config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS,
|
||||
RecyclerMinimumTimeout: int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS),
|
||||
RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS),
|
||||
RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(),
|
||||
}
|
||||
if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, &nfsConfig); err != nil {
|
||||
|
|
|
@ -40,7 +40,7 @@ type ProxyServerConfig struct {
|
|||
ResourceContainer string
|
||||
ContentType string
|
||||
KubeAPIQPS float32
|
||||
KubeAPIBurst int
|
||||
KubeAPIBurst int32
|
||||
ConfigSyncPeriod time.Duration
|
||||
CleanupAndExit bool
|
||||
NodeRef *api.ObjectReference
|
||||
|
@ -63,16 +63,16 @@ func NewProxyConfig() *ProxyServerConfig {
|
|||
func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Var(componentconfig.IPVar{Val: &s.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
|
||||
fs.IntVar(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port to bind the health check server. Use 0 to disable.")
|
||||
fs.Int32Var(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port to bind the health check server. Use 0 to disable.")
|
||||
fs.Var(componentconfig.IPVar{Val: &s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)")
|
||||
fs.IntVar(s.OOMScoreAdj, "oom-score-adj", util.IntPtrDerefOr(s.OOMScoreAdj, qos.KubeProxyOOMScoreAdj), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]")
|
||||
fs.Int32Var(s.OOMScoreAdj, "oom-score-adj", util.Int32PtrDerefOr(s.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]")
|
||||
fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).")
|
||||
fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.")
|
||||
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
|
||||
fs.Var(componentconfig.PortRangeVar{Val: &s.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.")
|
||||
fs.StringVar(&s.HostnameOverride, "hostname-override", s.HostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.")
|
||||
fs.Var(&s.Mode, "proxy-mode", "Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the '"+ExperimentalProxyModeAnnotation+"' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.")
|
||||
fs.IntVar(s.IPTablesMasqueradeBit, "iptables-masquerade-bit", util.IntPtrDerefOr(s.IPTablesMasqueradeBit, 14), "If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].")
|
||||
fs.Int32Var(s.IPTablesMasqueradeBit, "iptables-masquerade-bit", util.Int32PtrDerefOr(s.IPTablesMasqueradeBit, 14), "If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].")
|
||||
fs.DurationVar(&s.IPTablesSyncPeriod.Duration, "iptables-sync-period", s.IPTablesSyncPeriod.Duration, "How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.")
|
||||
fs.DurationVar(&s.ConfigSyncPeriod, "config-sync-period", s.ConfigSyncPeriod, "How often configuration from the apiserver is refreshed. Must be greater than 0.")
|
||||
fs.BoolVar(&s.MasqueradeAll, "masquerade-all", s.MasqueradeAll, "If using the pure iptables proxy, SNAT everything")
|
||||
|
@ -80,8 +80,8 @@ func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.BoolVar(&s.CleanupAndExit, "cleanup-iptables", s.CleanupAndExit, "If true cleanup iptables rules and exit.")
|
||||
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.")
|
||||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
||||
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.DurationVar(&s.UDPIdleTimeout.Duration, "udp-timeout", s.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace")
|
||||
fs.IntVar(&s.ConntrackMax, "conntrack-max", s.ConntrackMax, "Maximum number of NAT connections to track (0 to leave as-is)")
|
||||
fs.Int32Var(&s.ConntrackMax, "conntrack-max", s.ConntrackMax, "Maximum number of NAT connections to track (0 to leave as-is)")
|
||||
fs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", s.ConntrackTCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)")
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, err
|
|||
var oomAdjuster *oom.OOMAdjuster
|
||||
if config.OOMScoreAdj != nil {
|
||||
oomAdjuster = oom.NewOOMAdjuster()
|
||||
if err := oomAdjuster.ApplyOOMScoreAdj(0, *config.OOMScoreAdj); err != nil {
|
||||
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*config.OOMScoreAdj)); err != nil {
|
||||
glog.V(2).Info(err)
|
||||
}
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, err
|
|||
kubeconfig.ContentType = config.ContentType
|
||||
// Override kubeconfig qps/burst settings from flags
|
||||
kubeconfig.QPS = config.KubeAPIQPS
|
||||
kubeconfig.Burst = config.KubeAPIBurst
|
||||
kubeconfig.Burst = int(config.KubeAPIBurst)
|
||||
|
||||
client, err := kubeclient.New(kubeconfig)
|
||||
if err != nil {
|
||||
|
@ -204,7 +204,7 @@ func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, err
|
|||
return nil, fmt.Errorf("Unable to read IPTablesMasqueradeBit from config")
|
||||
}
|
||||
|
||||
proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll, *config.IPTablesMasqueradeBit, config.ClusterCIDR)
|
||||
proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll, int(*config.IPTablesMasqueradeBit), config.ClusterCIDR)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to create proxier: %v", err)
|
||||
}
|
||||
|
@ -289,7 +289,7 @@ func (s *ProxyServer) Run() error {
|
|||
})
|
||||
configz.InstallHandler(http.DefaultServeMux)
|
||||
go wait.Until(func() {
|
||||
err := http.ListenAndServe(s.Config.HealthzBindAddress+":"+strconv.Itoa(s.Config.HealthzPort), nil)
|
||||
err := http.ListenAndServe(s.Config.HealthzBindAddress+":"+strconv.Itoa(int(s.Config.HealthzPort)), nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Starting health server failed: %v", err)
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ func (s *ProxyServer) Run() error {
|
|||
// Tune conntrack, if requested
|
||||
if s.Conntracker != nil {
|
||||
if s.Config.ConntrackMax > 0 {
|
||||
if err := s.Conntracker.SetMax(s.Config.ConntrackMax); err != nil {
|
||||
if err := s.Conntracker.SetMax(int(s.Config.ConntrackMax)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -117,7 +117,7 @@ func NewKubeletServer() *KubeletServer {
|
|||
VolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
|
||||
NodeStatusUpdateFrequency: unversioned.Duration{Duration: 10 * time.Second},
|
||||
NodeLabels: make(map[string]string),
|
||||
OOMScoreAdj: qos.KubeletOOMScoreAdj,
|
||||
OOMScoreAdj: int32(qos.KubeletOOMScoreAdj),
|
||||
LockFilePath: "",
|
||||
PodInfraContainerImage: GetDefaultPodInfraContainerImage(),
|
||||
Port: ports.KubeletPort,
|
||||
|
@ -176,21 +176,21 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.StringVar(&s.HostPIDSources, "host-pid-sources", s.HostPIDSources, "Comma-separated list of sources from which the Kubelet allows pods to use the host pid namespace. [default=\"*\"]")
|
||||
fs.StringVar(&s.HostIPCSources, "host-ipc-sources", s.HostIPCSources, "Comma-separated list of sources from which the Kubelet allows pods to use the host ipc namespace. [default=\"*\"]")
|
||||
fs.Float64Var(&s.RegistryPullQPS, "registry-qps", s.RegistryPullQPS, "If > 0, limit registry pull QPS to this value. If 0, unlimited. [default=5.0]")
|
||||
fs.IntVar(&s.RegistryBurst, "registry-burst", s.RegistryBurst, "Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0")
|
||||
fs.Int32Var(&s.RegistryBurst, "registry-burst", s.RegistryBurst, "Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0")
|
||||
fs.Float32Var(&s.EventRecordQPS, "event-qps", s.EventRecordQPS, "If > 0, limit event creations per second to this value. If 0, unlimited.")
|
||||
fs.IntVar(&s.EventBurst, "event-burst", s.EventBurst, "Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0")
|
||||
fs.Int32Var(&s.EventBurst, "event-burst", s.EventBurst, "Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0")
|
||||
fs.BoolVar(&s.RunOnce, "runonce", s.RunOnce, "If true, exit after spawning pods from local manifests or remote urls. Exclusive with --api-servers, and --enable-server")
|
||||
fs.BoolVar(&s.EnableDebuggingHandlers, "enable-debugging-handlers", s.EnableDebuggingHandlers, "Enables server endpoints for log collection and local running of containers and commands")
|
||||
fs.DurationVar(&s.MinimumGCAge.Duration, "minimum-container-ttl-duration", s.MinimumGCAge.Duration, "Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m'")
|
||||
fs.IntVar(&s.MaxPerPodContainerCount, "maximum-dead-containers-per-container", s.MaxPerPodContainerCount, "Maximum number of old instances to retain per container. Each container takes up some disk space. Default: 2.")
|
||||
fs.IntVar(&s.MaxContainerCount, "maximum-dead-containers", s.MaxContainerCount, "Maximum number of old instances of containers to retain globally. Each container takes up some disk space. Default: 100.")
|
||||
fs.Int32Var(&s.MaxPerPodContainerCount, "maximum-dead-containers-per-container", s.MaxPerPodContainerCount, "Maximum number of old instances to retain per container. Each container takes up some disk space. Default: 2.")
|
||||
fs.Int32Var(&s.MaxContainerCount, "maximum-dead-containers", s.MaxContainerCount, "Maximum number of old instances of containers to retain globally. Each container takes up some disk space. Default: 100.")
|
||||
fs.Var(&s.AuthPath, "auth-path", "Path to .kubernetes_auth file, specifying how to authenticate to API server.")
|
||||
fs.MarkDeprecated("auth-path", "will be removed in a future version")
|
||||
fs.Var(&s.KubeConfig, "kubeconfig", "Path to a kubeconfig file, specifying how to authenticate to API server (the master location is set by the api-servers flag).")
|
||||
fs.UintVar(&s.CAdvisorPort, "cadvisor-port", s.CAdvisorPort, "The port of the localhost cAdvisor endpoint")
|
||||
fs.IntVar(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port of the localhost healthz endpoint")
|
||||
fs.Int32Var(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port of the localhost healthz endpoint")
|
||||
fs.Var(componentconfig.IPVar{Val: &s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the healthz server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)")
|
||||
fs.IntVar(&s.OOMScoreAdj, "oom-score-adj", s.OOMScoreAdj, "The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]")
|
||||
fs.Int32Var(&s.OOMScoreAdj, "oom-score-adj", s.OOMScoreAdj, "The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]")
|
||||
fs.StringSliceVar(&s.APIServerList, "api-servers", []string{}, "List of Kubernetes API servers for publishing events, and reading pods and services. (ip:port), comma separated.")
|
||||
fs.BoolVar(&s.RegisterNode, "register-node", s.RegisterNode, "Register the node with the apiserver (defaults to true if --api-servers is set)")
|
||||
fs.StringVar(&s.ClusterDomain, "cluster-domain", s.ClusterDomain, "Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains")
|
||||
|
@ -201,9 +201,9 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||
bindableNodeLabels := config.ConfigurationMap(s.NodeLabels)
|
||||
fs.Var(&bindableNodeLabels, "node-labels", "<Warning: Alpha feature> Labels to add when registering the node in the cluster. Labels must be key=value pairs separated by ','.")
|
||||
fs.DurationVar(&s.ImageMinimumGCAge.Duration, "minimum-image-ttl-duration", s.ImageMinimumGCAge.Duration, "Minimum age for a unused image before it is garbage collected. Examples: '300ms', '10s' or '2h45m'. Default: '2m'")
|
||||
fs.IntVar(&s.ImageGCHighThresholdPercent, "image-gc-high-threshold", s.ImageGCHighThresholdPercent, "The percent of disk usage after which image garbage collection is always run. Default: 90%")
|
||||
fs.IntVar(&s.ImageGCLowThresholdPercent, "image-gc-low-threshold", s.ImageGCLowThresholdPercent, "The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. Default: 80%")
|
||||
fs.IntVar(&s.LowDiskSpaceThresholdMB, "low-diskspace-threshold-mb", s.LowDiskSpaceThresholdMB, "The absolute free disk space, in MB, to maintain. When disk space falls below this threshold, new pods would be rejected. Default: 256")
|
||||
fs.Int32Var(&s.ImageGCHighThresholdPercent, "image-gc-high-threshold", s.ImageGCHighThresholdPercent, "The percent of disk usage after which image garbage collection is always run. Default: 90%")
|
||||
fs.Int32Var(&s.ImageGCLowThresholdPercent, "image-gc-low-threshold", s.ImageGCLowThresholdPercent, "The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. Default: 80%")
|
||||
fs.Int32Var(&s.LowDiskSpaceThresholdMB, "low-diskspace-threshold-mb", s.LowDiskSpaceThresholdMB, "The absolute free disk space, in MB, to maintain. When disk space falls below this threshold, new pods would be rejected. Default: 256")
|
||||
fs.DurationVar(&s.VolumeStatsAggPeriod.Duration, "volume-stats-agg-period", s.VolumeStatsAggPeriod.Duration, "Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes. To disable volume calculations, set to 0. Default: '1m'")
|
||||
fs.StringVar(&s.NetworkPluginName, "network-plugin", s.NetworkPluginName, "<Warning: Alpha feature> The name of the network plugin to be invoked for various events in kubelet/pod lifecycle")
|
||||
fs.StringVar(&s.NetworkPluginDir, "network-plugin-dir", s.NetworkPluginDir, "<Warning: Alpha feature> The full path of the directory in which to search for network plugins")
|
||||
|
@ -230,7 +230,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.StringVar(&s.HairpinMode, "hairpin-mode", s.HairpinMode, "How should the kubelet setup hairpin NAT. This allows endpoints of a Service to loadbalance back to themselves if they should try to access their own Service. Valid values are \"promiscuous-bridge\", \"hairpin-veth\" and \"none\".")
|
||||
fs.BoolVar(&s.BabysitDaemons, "babysit-daemons", s.BabysitDaemons, "If true, the node has babysitter process monitoring docker and kubelet.")
|
||||
fs.MarkDeprecated("babysit-daemons", "Will be removed in a future version.")
|
||||
fs.IntVar(&s.MaxPods, "max-pods", s.MaxPods, "Number of Pods that can run on this Kubelet.")
|
||||
fs.Int32Var(&s.MaxPods, "max-pods", s.MaxPods, "Number of Pods that can run on this Kubelet.")
|
||||
fs.StringVar(&s.DockerExecHandlerName, "docker-exec-handler", s.DockerExecHandlerName, "Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.")
|
||||
fs.StringVar(&s.NonMasqueradeCIDR, "non-masquerade-cidr", s.NonMasqueradeCIDR, "Traffic to IPs outside this range will use IP masquerade.")
|
||||
fs.StringVar(&s.PodCIDR, "pod-cidr", "", "The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.")
|
||||
|
@ -247,7 +247,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.BoolVar(&s.RegisterSchedulable, "register-schedulable", s.RegisterSchedulable, "Register the node as schedulable. No-op if register-node is false. [default=true]")
|
||||
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.")
|
||||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
||||
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.BoolVar(&s.SerializeImagePulls, "serialize-image-pulls", s.SerializeImagePulls, "Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. [default=true]")
|
||||
fs.BoolVar(&s.ExperimentalFlannelOverlay, "experimental-flannel-overlay", s.ExperimentalFlannelOverlay, "Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]")
|
||||
fs.DurationVar(&s.OutOfDiskTransitionFrequency.Duration, "outofdisk-transition-frequency", s.OutOfDiskTransitionFrequency.Duration, "Duration for which the kubelet has to wait before transitioning out of out-of-disk node condition status. Default: 5m0s")
|
||||
|
|
|
@ -160,13 +160,13 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) {
|
|||
|
||||
imageGCPolicy := kubelet.ImageGCPolicy{
|
||||
MinAge: s.ImageMinimumGCAge.Duration,
|
||||
HighThresholdPercent: s.ImageGCHighThresholdPercent,
|
||||
LowThresholdPercent: s.ImageGCLowThresholdPercent,
|
||||
HighThresholdPercent: int(s.ImageGCHighThresholdPercent),
|
||||
LowThresholdPercent: int(s.ImageGCLowThresholdPercent),
|
||||
}
|
||||
|
||||
diskSpacePolicy := kubelet.DiskSpacePolicy{
|
||||
DockerFreeDiskMB: s.LowDiskSpaceThresholdMB,
|
||||
RootFreeDiskMB: s.LowDiskSpaceThresholdMB,
|
||||
DockerFreeDiskMB: int(s.LowDiskSpaceThresholdMB),
|
||||
RootFreeDiskMB: int(s.LowDiskSpaceThresholdMB),
|
||||
}
|
||||
|
||||
manifestURLHeader := make(http.Header)
|
||||
|
@ -205,7 +205,7 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) {
|
|||
EnableCustomMetrics: s.EnableCustomMetrics,
|
||||
EnableDebuggingHandlers: s.EnableDebuggingHandlers,
|
||||
EnableServer: s.EnableServer,
|
||||
EventBurst: s.EventBurst,
|
||||
EventBurst: int(s.EventBurst),
|
||||
EventRecordQPS: s.EventRecordQPS,
|
||||
FileCheckFrequency: s.FileCheckFrequency.Duration,
|
||||
HostnameOverride: s.HostnameOverride,
|
||||
|
@ -218,10 +218,10 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) {
|
|||
ManifestURL: s.ManifestURL,
|
||||
ManifestURLHeader: manifestURLHeader,
|
||||
MasterServiceNamespace: s.MasterServiceNamespace,
|
||||
MaxContainerCount: s.MaxContainerCount,
|
||||
MaxContainerCount: int(s.MaxContainerCount),
|
||||
MaxOpenFiles: s.MaxOpenFiles,
|
||||
MaxPerPodContainerCount: s.MaxPerPodContainerCount,
|
||||
MaxPods: s.MaxPods,
|
||||
MaxPerPodContainerCount: int(s.MaxPerPodContainerCount),
|
||||
MaxPods: int(s.MaxPods),
|
||||
MinimumGCAge: s.MinimumGCAge.Duration,
|
||||
Mounter: mounter,
|
||||
NetworkPluginName: s.NetworkPluginName,
|
||||
|
@ -238,7 +238,7 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) {
|
|||
ReadOnlyPort: s.ReadOnlyPort,
|
||||
RegisterNode: s.RegisterNode,
|
||||
RegisterSchedulable: s.RegisterSchedulable,
|
||||
RegistryBurst: s.RegistryBurst,
|
||||
RegistryBurst: int(s.RegistryBurst),
|
||||
RegistryPullQPS: s.RegistryPullQPS,
|
||||
ResolverConfig: s.ResolverConfig,
|
||||
Reservation: *reservation,
|
||||
|
@ -302,7 +302,7 @@ func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) {
|
|||
// make a separate client for events
|
||||
eventClientConfig := *clientConfig
|
||||
eventClientConfig.QPS = s.EventRecordQPS
|
||||
eventClientConfig.Burst = s.EventBurst
|
||||
eventClientConfig.Burst = int(s.EventBurst)
|
||||
kcfg.EventClient, err = clientset.NewForConfig(&eventClientConfig)
|
||||
}
|
||||
if err != nil && len(s.APIServerList) > 0 {
|
||||
|
@ -349,7 +349,7 @@ func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) {
|
|||
|
||||
// TODO(vmarmol): Do this through container config.
|
||||
oomAdjuster := kcfg.OOMAdjuster
|
||||
if err := oomAdjuster.ApplyOOMScoreAdj(0, s.OOMScoreAdj); err != nil {
|
||||
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(s.OOMScoreAdj)); err != nil {
|
||||
glog.Warning(err)
|
||||
}
|
||||
|
||||
|
@ -360,7 +360,7 @@ func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) {
|
|||
if s.HealthzPort > 0 {
|
||||
healthz.DefaultHealthz()
|
||||
go wait.Until(func() {
|
||||
err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(s.HealthzPort)), nil)
|
||||
err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(int(s.HealthzPort))), nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Starting health server failed: %v", err)
|
||||
}
|
||||
|
@ -473,7 +473,7 @@ func CreateAPIServerClientConfig(s *options.KubeletServer) (*restclient.Config,
|
|||
clientConfig.ContentType = s.ContentType
|
||||
// Override kubeconfig qps/burst settings from flags
|
||||
clientConfig.QPS = s.KubeAPIQPS
|
||||
clientConfig.Burst = s.KubeAPIBurst
|
||||
clientConfig.Burst = int(s.KubeAPIBurst)
|
||||
|
||||
addChaosToClientConfig(s, clientConfig)
|
||||
return clientConfig, nil
|
||||
|
@ -801,7 +801,7 @@ func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
|
|||
}
|
||||
|
||||
daemonEndpoints := &api.NodeDaemonEndpoints{
|
||||
KubeletEndpoint: api.DaemonEndpoint{Port: int(kc.Port)},
|
||||
KubeletEndpoint: api.DaemonEndpoint{Port: int32(kc.Port)},
|
||||
}
|
||||
|
||||
pc = kc.PodConfig
|
||||
|
|
|
@ -127,20 +127,20 @@ func (s *CMServer) Run(_ []string) error {
|
|||
}
|
||||
mux.Handle("/metrics", prometheus.Handler())
|
||||
server := &http.Server{
|
||||
Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)),
|
||||
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
|
||||
Handler: mux,
|
||||
}
|
||||
glog.Fatal(server.ListenAndServe())
|
||||
}()
|
||||
|
||||
endpoints := s.createEndpointController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller")))
|
||||
go endpoints.Run(s.ConcurrentEndpointSyncs, wait.NeverStop)
|
||||
go endpoints.Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop)
|
||||
|
||||
go replicationcontroller.NewReplicationManagerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replication-controller")), s.resyncPeriod, replicationcontroller.BurstReplicas, s.LookupCacheSizeForRC).
|
||||
Run(s.ConcurrentRCSyncs, wait.NeverStop)
|
||||
go replicationcontroller.NewReplicationManagerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replication-controller")), s.resyncPeriod, replicationcontroller.BurstReplicas, int(s.LookupCacheSizeForRC)).
|
||||
Run(int(s.ConcurrentRCSyncs), wait.NeverStop)
|
||||
|
||||
if s.TerminatedPodGCThreshold > 0 {
|
||||
go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), s.resyncPeriod, s.TerminatedPodGCThreshold).
|
||||
go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), s.resyncPeriod, int(s.TerminatedPodGCThreshold)).
|
||||
Run(wait.NeverStop)
|
||||
}
|
||||
|
||||
|
@ -154,8 +154,8 @@ func (s *CMServer) Run(_ []string) error {
|
|||
}
|
||||
_, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR)
|
||||
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")),
|
||||
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
|
||||
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
|
||||
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod.Duration)
|
||||
|
||||
|
@ -195,7 +195,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||
ReplenishmentResyncPeriod: s.resyncPeriod,
|
||||
ControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactoryFromClient(resourceQuotaControllerClient),
|
||||
}
|
||||
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
|
||||
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), wait.NeverStop)
|
||||
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
// important when we start apiserver and controller manager at the same time.
|
||||
|
@ -225,7 +225,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||
glog.Fatalf("Failed to get supported resources from server: %v", err)
|
||||
}
|
||||
namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, groupVersionResources, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes)
|
||||
go namespaceController.Run(s.ConcurrentNamespaceSyncs, wait.NeverStop)
|
||||
go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop)
|
||||
|
||||
groupVersion := "extensions/v1beta1"
|
||||
resources, found := resourceMap[groupVersion]
|
||||
|
@ -248,26 +248,26 @@ func (s *CMServer) Run(_ []string) error {
|
|||
|
||||
if containsResource(resources, "daemonsets") {
|
||||
glog.Infof("Starting daemon set controller")
|
||||
go daemon.NewDaemonSetsControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), s.resyncPeriod, s.LookupCacheSizeForDaemonSet).
|
||||
Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop)
|
||||
go daemon.NewDaemonSetsControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), s.resyncPeriod, int(s.LookupCacheSizeForDaemonSet)).
|
||||
Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop)
|
||||
}
|
||||
|
||||
if containsResource(resources, "jobs") {
|
||||
glog.Infof("Starting job controller")
|
||||
go job.NewJobControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller")), s.resyncPeriod).
|
||||
Run(s.ConcurrentJobSyncs, wait.NeverStop)
|
||||
Run(int(s.ConcurrentJobSyncs), wait.NeverStop)
|
||||
}
|
||||
|
||||
if containsResource(resources, "deployments") {
|
||||
glog.Infof("Starting deployment controller")
|
||||
go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), s.resyncPeriod).
|
||||
Run(s.ConcurrentDeploymentSyncs, wait.NeverStop)
|
||||
Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop)
|
||||
}
|
||||
|
||||
if containsResource(resources, "replicasets") {
|
||||
glog.Infof("Starting ReplicaSet controller")
|
||||
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), s.resyncPeriod, replicaset.BurstReplicas, s.LookupCacheSizeForRS).
|
||||
Run(s.ConcurrentRSSyncs, wait.NeverStop)
|
||||
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), s.resyncPeriod, replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)).
|
||||
Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -286,7 +286,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
|
||||
s.PVClaimBinderSyncPeriod.Duration,
|
||||
s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry,
|
||||
int(s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry),
|
||||
kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
|
||||
cloud,
|
||||
)
|
||||
|
|
|
@ -514,7 +514,7 @@ func NewTestPod(i int) *api.Pod {
|
|||
Name: "foo",
|
||||
Ports: []api.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8000 + i,
|
||||
ContainerPort: int32(8000 + i),
|
||||
Protocol: api.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -191,7 +191,7 @@ func (s *KubeletExecutorServer) runKubelet(
|
|||
|
||||
// make a separate client for events
|
||||
eventClientConfig.QPS = s.EventRecordQPS
|
||||
eventClientConfig.Burst = s.EventBurst
|
||||
eventClientConfig.Burst = int(s.EventBurst)
|
||||
kcfg.EventClient, err = clientset.NewForConfig(eventClientConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -140,7 +140,7 @@ func (b *binder) prepareTaskForLaunch(ctx api.Context, machine string, task *pod
|
|||
oemPorts := pod.Spec.Containers[entry.ContainerIdx].Ports
|
||||
ports := append([]api.ContainerPort{}, oemPorts...)
|
||||
p := &ports[entry.PortIdx]
|
||||
p.HostPort = int(entry.OfferPort)
|
||||
p.HostPort = int32(entry.OfferPort)
|
||||
op := strconv.FormatUint(entry.OfferPort, 10)
|
||||
pod.Annotations[fmt.Sprintf(annotation.PortMappingKeyFormat, p.Protocol, p.ContainerPort)] = op
|
||||
if p.Name != "" {
|
||||
|
|
|
@ -293,7 +293,7 @@ func NewTestPod() (*api.Pod, int) {
|
|||
{
|
||||
Ports: []api.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8000 + currentPodNum,
|
||||
ContainerPort: int32(8000 + currentPodNum),
|
||||
Protocol: api.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -73,7 +73,7 @@ func (m *SchedulerServer) createSchedulerServiceIfNeeded(serviceName string, ser
|
|||
Labels: map[string]string{"provider": "k8sm", "component": "scheduler"},
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}},
|
||||
Ports: []api.ServicePort{{Port: int32(servicePort), Protocol: api.ProtocolTCP}},
|
||||
// maintained by this code, not by the pod selector
|
||||
Selector: nil,
|
||||
SessionAffinity: api.ServiceAffinityNone,
|
||||
|
@ -96,7 +96,7 @@ func (m *SchedulerServer) setEndpoints(serviceName string, ip net.IP, port int)
|
|||
// The setting we want to find.
|
||||
want := []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: ip.String()}},
|
||||
Ports: []api.EndpointPort{{Port: port, Protocol: api.ProtocolTCP}},
|
||||
Ports: []api.EndpointPort{{Port: int32(port), Protocol: api.ProtocolTCP}},
|
||||
}}
|
||||
|
||||
ctx := api.NewDefaultContext()
|
||||
|
|
|
@ -320,7 +320,7 @@ func (e *endpointController) syncService(key string) {
|
|||
}
|
||||
|
||||
// HACK(jdef): use HostIP instead of pod.CurrentState.PodIP for generic mesos compat
|
||||
epp := api.EndpointPort{Name: portName, Port: portNum, Protocol: portProto}
|
||||
epp := api.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
|
||||
epa := api.EndpointAddress{IP: pod.Status.HostIP, TargetRef: &api.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Namespace: pod.ObjectMeta.Namespace,
|
||||
|
@ -416,7 +416,7 @@ func findPort(pod *api.Pod, svcPort *api.ServicePort) (int, int, error) {
|
|||
for _, port := range container.Ports {
|
||||
if port.Name == name && port.Protocol == svcPort.Protocol {
|
||||
hostPort, err := findMappedPortName(pod, port.Protocol, name)
|
||||
return hostPort, port.ContainerPort, err
|
||||
return hostPort, int(port.ContainerPort), err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -429,9 +429,9 @@ func findPort(pod *api.Pod, svcPort *api.ServicePort) (int, int, error) {
|
|||
p := portName.IntValue()
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, port := range container.Ports {
|
||||
if port.ContainerPort == p && port.Protocol == svcPort.Protocol {
|
||||
if int(port.ContainerPort) == p && port.Protocol == svcPort.Protocol {
|
||||
hostPort, err := findMappedPort(pod, port.Protocol, p)
|
||||
return hostPort, port.ContainerPort, err
|
||||
return hostPort, int(port.ContainerPort), err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ func FindPort(pod *api.Pod, svcPort *api.ServicePort) (int, error) {
|
|||
for _, container := range pod.Spec.Containers {
|
||||
for _, port := range container.Ports {
|
||||
if port.Name == name && port.Protocol == svcPort.Protocol {
|
||||
return port.ContainerPort, nil
|
||||
return int(port.ContainerPort), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -159,8 +159,8 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source)
|
|||
},
|
||||
func(j *batch.JobSpec, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(j) // fuzz self without calling this function again
|
||||
completions := int(c.Rand.Int31())
|
||||
parallelism := int(c.Rand.Int31())
|
||||
completions := int32(c.Rand.Int31())
|
||||
parallelism := int32(c.Rand.Int31())
|
||||
j.Completions = &completions
|
||||
j.Parallelism = ¶llelism
|
||||
if c.Rand.Int31()%2 == 0 {
|
||||
|
@ -395,9 +395,9 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source)
|
|||
},
|
||||
func(s *extensions.HorizontalPodAutoscalerSpec, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(s) // fuzz self without calling this function again
|
||||
minReplicas := int(c.Rand.Int31())
|
||||
minReplicas := int32(c.Rand.Int31())
|
||||
s.MinReplicas = &minReplicas
|
||||
s.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: int(int32(c.RandUint64()))}
|
||||
s.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: int32(c.RandUint64())}
|
||||
},
|
||||
func(s *extensions.SubresourceReference, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(s) // fuzz self without calling this function again
|
||||
|
|
|
@ -236,7 +236,7 @@ func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *R
|
|||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*ReplicationControllerSpec))(in)
|
||||
}
|
||||
out.Replicas = int(*in.Replicas)
|
||||
out.Replicas = *in.Replicas
|
||||
if in.Selector != nil {
|
||||
out.Selector = make(map[string]string)
|
||||
for key, val := range in.Selector {
|
||||
|
|
|
@ -994,10 +994,10 @@ func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) fiel
|
|||
}
|
||||
if port.ContainerPort == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, PortRangeErrorMsg))
|
||||
} else if !validation.IsValidPortNum(port.ContainerPort) {
|
||||
} else if !validation.IsValidPortNum(int(port.ContainerPort)) {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, PortRangeErrorMsg))
|
||||
}
|
||||
if port.HostPort != 0 && !validation.IsValidPortNum(port.HostPort) {
|
||||
if port.HostPort != 0 && !validation.IsValidPortNum(int(port.HostPort)) {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, PortRangeErrorMsg))
|
||||
}
|
||||
if len(port.Protocol) == 0 {
|
||||
|
@ -1808,7 +1808,7 @@ func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService boo
|
|||
}
|
||||
}
|
||||
|
||||
if !validation.IsValidPortNum(sp.Port) {
|
||||
if !validation.IsValidPortNum(int(sp.Port)) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, PortRangeErrorMsg))
|
||||
}
|
||||
|
||||
|
@ -1891,7 +1891,7 @@ func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path
|
|||
}
|
||||
|
||||
// Validates the given template and ensures that it is in accordance with the desrired selector and replicas.
|
||||
func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int, fldPath *field.Path) field.ErrorList {
|
||||
func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if template == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath, ""))
|
||||
|
@ -2656,7 +2656,7 @@ func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *fie
|
|||
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), port.Name, DNS1123LabelErrorMsg))
|
||||
}
|
||||
}
|
||||
if !validation.IsValidPortNum(port.Port) {
|
||||
if !validation.IsValidPortNum(int(port.Port)) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, PortRangeErrorMsg))
|
||||
}
|
||||
if len(port.Protocol) == 0 {
|
||||
|
|
|
@ -631,7 +631,7 @@ func TestValidatePersistentVolumeClaimUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestValidateVolumes(t *testing.T) {
|
||||
lun := 1
|
||||
lun := int32(1)
|
||||
successCase := []api.Volume{
|
||||
{Name: "abc", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/mnt/path1"}}},
|
||||
{Name: "123", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/mnt/path2"}}},
|
||||
|
|
|
@ -88,14 +88,14 @@ func Convert_v1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPodAutoscale
|
|||
return err
|
||||
}
|
||||
if in.MinReplicas != nil {
|
||||
out.MinReplicas = new(int)
|
||||
*out.MinReplicas = int(*in.MinReplicas)
|
||||
out.MinReplicas = new(int32)
|
||||
*out.MinReplicas = *in.MinReplicas
|
||||
} else {
|
||||
out.MinReplicas = nil
|
||||
}
|
||||
out.MaxReplicas = int(in.MaxReplicas)
|
||||
out.MaxReplicas = in.MaxReplicas
|
||||
if in.TargetCPUUtilizationPercentage != nil {
|
||||
out.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: int(*in.TargetCPUUtilizationPercentage)}
|
||||
out.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: *in.TargetCPUUtilizationPercentage}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -58,24 +58,9 @@ func Convert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conv
|
|||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*batch.JobSpec))(in)
|
||||
}
|
||||
if in.Parallelism != nil {
|
||||
out.Parallelism = new(int32)
|
||||
*out.Parallelism = int32(*in.Parallelism)
|
||||
} else {
|
||||
out.Parallelism = nil
|
||||
}
|
||||
if in.Completions != nil {
|
||||
out.Completions = new(int32)
|
||||
*out.Completions = int32(*in.Completions)
|
||||
} else {
|
||||
out.Completions = nil
|
||||
}
|
||||
if in.ActiveDeadlineSeconds != nil {
|
||||
out.ActiveDeadlineSeconds = new(int64)
|
||||
*out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds
|
||||
} else {
|
||||
out.ActiveDeadlineSeconds = nil
|
||||
}
|
||||
out.Parallelism = in.Parallelism
|
||||
out.Completions = in.Completions
|
||||
out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
|
||||
// unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector
|
||||
if in.Selector != nil {
|
||||
out.Selector = new(LabelSelector)
|
||||
|
@ -102,24 +87,9 @@ func Convert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conv
|
|||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*JobSpec))(in)
|
||||
}
|
||||
if in.Parallelism != nil {
|
||||
out.Parallelism = new(int)
|
||||
*out.Parallelism = int(*in.Parallelism)
|
||||
} else {
|
||||
out.Parallelism = nil
|
||||
}
|
||||
if in.Completions != nil {
|
||||
out.Completions = new(int)
|
||||
*out.Completions = int(*in.Completions)
|
||||
} else {
|
||||
out.Completions = nil
|
||||
}
|
||||
if in.ActiveDeadlineSeconds != nil {
|
||||
out.ActiveDeadlineSeconds = new(int64)
|
||||
*out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds
|
||||
} else {
|
||||
out.ActiveDeadlineSeconds = nil
|
||||
}
|
||||
out.Parallelism = in.Parallelism
|
||||
out.Completions = in.Completions
|
||||
out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
|
||||
// unable to generate simple pointer conversion for v1.LabelSelector -> unversioned.LabelSelector
|
||||
if in.Selector != nil {
|
||||
out.Selector = new(unversioned.LabelSelector)
|
||||
|
|
|
@ -84,7 +84,7 @@ func TestValidateJob(t *testing.T) {
|
|||
t.Errorf("expected success for %s: %v", k, errs)
|
||||
}
|
||||
}
|
||||
negative := -1
|
||||
negative := int32(-1)
|
||||
negative64 := int64(-1)
|
||||
errorCases := map[string]batch.Job{
|
||||
"spec.parallelism:must be greater than or equal to 0": {
|
||||
|
|
|
@ -110,7 +110,7 @@ func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out
|
|||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*ScaleStatus))(in)
|
||||
}
|
||||
out.Replicas = int(in.Replicas)
|
||||
out.Replicas = in.Replicas
|
||||
|
||||
// Normally when 2 fields map to the same internal value we favor the old field, since
|
||||
// old clients can't be expected to know about new fields but clients that know about the
|
||||
|
@ -140,8 +140,7 @@ func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.
|
|||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*extensions.DeploymentSpec))(in)
|
||||
}
|
||||
out.Replicas = new(int32)
|
||||
*out.Replicas = int32(in.Replicas)
|
||||
out.Replicas = &in.Replicas
|
||||
if in.Selector != nil {
|
||||
out.Selector = new(LabelSelector)
|
||||
if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil {
|
||||
|
@ -176,7 +175,7 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentS
|
|||
defaulting.(func(*DeploymentSpec))(in)
|
||||
}
|
||||
if in.Replicas != nil {
|
||||
out.Replicas = int(*in.Replicas)
|
||||
out.Replicas = *in.Replicas
|
||||
}
|
||||
|
||||
if in.Selector != nil {
|
||||
|
@ -193,11 +192,8 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentS
|
|||
if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if in.RevisionHistoryLimit != nil {
|
||||
out.RevisionHistoryLimit = new(int)
|
||||
*out.RevisionHistoryLimit = int(*in.RevisionHistoryLimit)
|
||||
}
|
||||
out.MinReadySeconds = int(in.MinReadySeconds)
|
||||
out.RevisionHistoryLimit = in.RevisionHistoryLimit
|
||||
out.MinReadySeconds = in.MinReadySeconds
|
||||
out.Paused = in.Paused
|
||||
if in.RollbackTo != nil {
|
||||
out.RollbackTo = new(extensions.RollbackConfig)
|
||||
|
@ -298,7 +294,7 @@ func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetS
|
|||
defaulting.(func(*ReplicaSetSpec))(in)
|
||||
}
|
||||
if in.Replicas != nil {
|
||||
out.Replicas = int(*in.Replicas)
|
||||
out.Replicas = *in.Replicas
|
||||
}
|
||||
if in.Selector != nil {
|
||||
out.Selector = new(unversioned.LabelSelector)
|
||||
|
@ -318,24 +314,9 @@ func Convert_batch_JobSpec_To_v1beta1_JobSpec(in *batch.JobSpec, out *JobSpec, s
|
|||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*batch.JobSpec))(in)
|
||||
}
|
||||
if in.Parallelism != nil {
|
||||
out.Parallelism = new(int32)
|
||||
*out.Parallelism = int32(*in.Parallelism)
|
||||
} else {
|
||||
out.Parallelism = nil
|
||||
}
|
||||
if in.Completions != nil {
|
||||
out.Completions = new(int32)
|
||||
*out.Completions = int32(*in.Completions)
|
||||
} else {
|
||||
out.Completions = nil
|
||||
}
|
||||
if in.ActiveDeadlineSeconds != nil {
|
||||
out.ActiveDeadlineSeconds = new(int64)
|
||||
*out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds
|
||||
} else {
|
||||
out.ActiveDeadlineSeconds = nil
|
||||
}
|
||||
out.Parallelism = in.Parallelism
|
||||
out.Completions = in.Completions
|
||||
out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
|
||||
// unable to generate simple pointer conversion for unversioned.LabelSelector -> v1beta1.LabelSelector
|
||||
if in.Selector != nil {
|
||||
out.Selector = new(LabelSelector)
|
||||
|
@ -370,24 +351,9 @@ func Convert_v1beta1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s
|
|||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*JobSpec))(in)
|
||||
}
|
||||
if in.Parallelism != nil {
|
||||
out.Parallelism = new(int)
|
||||
*out.Parallelism = int(*in.Parallelism)
|
||||
} else {
|
||||
out.Parallelism = nil
|
||||
}
|
||||
if in.Completions != nil {
|
||||
out.Completions = new(int)
|
||||
*out.Completions = int(*in.Completions)
|
||||
} else {
|
||||
out.Completions = nil
|
||||
}
|
||||
if in.ActiveDeadlineSeconds != nil {
|
||||
out.ActiveDeadlineSeconds = new(int64)
|
||||
*out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds
|
||||
} else {
|
||||
out.ActiveDeadlineSeconds = nil
|
||||
}
|
||||
out.Parallelism = in.Parallelism
|
||||
out.Completions = in.Completions
|
||||
out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds
|
||||
// unable to generate simple pointer conversion for v1beta1.LabelSelector -> unversioned.LabelSelector
|
||||
if in.Selector != nil {
|
||||
out.Selector = new(unversioned.LabelSelector)
|
||||
|
|
|
@ -583,7 +583,7 @@ func ValidateReplicaSetSpec(spec *extensions.ReplicaSetSpec, fldPath *field.Path
|
|||
}
|
||||
|
||||
// Validates the given template and ensures that it is in accordance with the desired selector and replicas.
|
||||
func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int, fldPath *field.Path) field.ErrorList {
|
||||
func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if template == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath, ""))
|
||||
|
|
|
@ -41,7 +41,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
Name: "myrc",
|
||||
Subresource: "scale",
|
||||
},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70},
|
||||
},
|
||||
|
@ -57,7 +57,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
Name: "myrc",
|
||||
Subresource: "scale",
|
||||
},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
},
|
||||
},
|
||||
|
@ -75,7 +75,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
Name: "myrc",
|
||||
Subresource: "scale",
|
||||
},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
},
|
||||
},
|
||||
|
@ -95,7 +95,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault},
|
||||
Spec: extensions.HorizontalPodAutoscalerSpec{
|
||||
ScaleRef: extensions.SubresourceReference{Name: "myrc", Subresource: "scale"},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70},
|
||||
},
|
||||
|
@ -107,7 +107,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault},
|
||||
Spec: extensions.HorizontalPodAutoscalerSpec{
|
||||
ScaleRef: extensions.SubresourceReference{Kind: "..", Name: "myrc", Subresource: "scale"},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70},
|
||||
},
|
||||
|
@ -119,7 +119,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault},
|
||||
Spec: extensions.HorizontalPodAutoscalerSpec{
|
||||
ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Subresource: "scale"},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70},
|
||||
},
|
||||
|
@ -131,7 +131,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault},
|
||||
Spec: extensions.HorizontalPodAutoscalerSpec{
|
||||
ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Name: "..", Subresource: "scale"},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70},
|
||||
},
|
||||
|
@ -143,7 +143,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault},
|
||||
Spec: extensions.HorizontalPodAutoscalerSpec{
|
||||
ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Name: "myrc", Subresource: ""},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70},
|
||||
},
|
||||
|
@ -155,7 +155,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault},
|
||||
Spec: extensions.HorizontalPodAutoscalerSpec{
|
||||
ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Name: "myrc", Subresource: ".."},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70},
|
||||
},
|
||||
|
@ -167,7 +167,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault},
|
||||
Spec: extensions.HorizontalPodAutoscalerSpec{
|
||||
ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Name: "myrc", Subresource: "randomsubresource"},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70},
|
||||
},
|
||||
|
@ -184,7 +184,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
ScaleRef: extensions.SubresourceReference{
|
||||
Subresource: "scale",
|
||||
},
|
||||
MinReplicas: newInt(-1),
|
||||
MinReplicas: newInt32(-1),
|
||||
MaxReplicas: 5,
|
||||
},
|
||||
},
|
||||
|
@ -200,7 +200,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
ScaleRef: extensions.SubresourceReference{
|
||||
Subresource: "scale",
|
||||
},
|
||||
MinReplicas: newInt(7),
|
||||
MinReplicas: newInt32(7),
|
||||
MaxReplicas: 5,
|
||||
},
|
||||
},
|
||||
|
@ -216,7 +216,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
ScaleRef: extensions.SubresourceReference{
|
||||
Subresource: "scale",
|
||||
},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: -70},
|
||||
},
|
||||
|
@ -238,7 +238,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
Name: "myrc",
|
||||
Subresource: "scale",
|
||||
},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
},
|
||||
},
|
||||
|
@ -259,7 +259,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
Name: "myrc",
|
||||
Subresource: "scale",
|
||||
},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
},
|
||||
},
|
||||
|
@ -280,7 +280,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
Name: "myrc",
|
||||
Subresource: "scale",
|
||||
},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
},
|
||||
},
|
||||
|
@ -301,7 +301,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
|||
Name: "myrc",
|
||||
Subresource: "scale",
|
||||
},
|
||||
MinReplicas: newInt(1),
|
||||
MinReplicas: newInt32(1),
|
||||
MaxReplicas: 5,
|
||||
},
|
||||
},
|
||||
|
@ -1709,8 +1709,8 @@ func TestValidateReplicaSet(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func newInt(val int) *int {
|
||||
p := new(int)
|
||||
func newInt32(val int32) *int32 {
|
||||
p := new(int32)
|
||||
*p = val
|
||||
return p
|
||||
}
|
||||
|
|
|
@ -236,7 +236,7 @@ func (e *eventLogger) eventObserve(newEvent *api.Event) (*api.Event, []byte, err
|
|||
event.Name = lastObservation.name
|
||||
event.ResourceVersion = lastObservation.resourceVersion
|
||||
event.FirstTimestamp = lastObservation.firstTimestamp
|
||||
event.Count = lastObservation.count + 1
|
||||
event.Count = int32(lastObservation.count) + 1
|
||||
|
||||
eventCopy2 := *event
|
||||
eventCopy2.Count = 0
|
||||
|
@ -251,7 +251,7 @@ func (e *eventLogger) eventObserve(newEvent *api.Event) (*api.Event, []byte, err
|
|||
e.cache.Add(
|
||||
key,
|
||||
eventLog{
|
||||
count: event.Count,
|
||||
count: int(event.Count),
|
||||
firstTimestamp: event.FirstTimestamp,
|
||||
name: event.Name,
|
||||
resourceVersion: event.ResourceVersion,
|
||||
|
@ -269,7 +269,7 @@ func (e *eventLogger) updateState(event *api.Event) {
|
|||
e.cache.Add(
|
||||
key,
|
||||
eventLog{
|
||||
count: event.Count,
|
||||
count: int(event.Count),
|
||||
firstTimestamp: event.FirstTimestamp,
|
||||
name: event.Name,
|
||||
resourceVersion: event.ResourceVersion,
|
||||
|
|
|
@ -87,7 +87,7 @@ func makeSimilarEvents(num int, template api.Event, messagePrefix string) []api.
|
|||
}
|
||||
|
||||
func setCount(event api.Event, count int) api.Event {
|
||||
event.Count = count
|
||||
event.Count = int32(count)
|
||||
return event
|
||||
}
|
||||
|
||||
|
|
|
@ -717,8 +717,8 @@ func loadBalancerPortRange(ports []api.ServicePort) (string, error) {
|
|||
return "", fmt.Errorf("Invalid protocol %s, only TCP and UDP are supported", string(ports[0].Protocol))
|
||||
}
|
||||
|
||||
minPort := 65536
|
||||
maxPort := 0
|
||||
minPort := int32(65536)
|
||||
maxPort := int32(0)
|
||||
for i := range ports {
|
||||
if ports[i].Port < minPort {
|
||||
minPort = ports[i].Port
|
||||
|
@ -776,7 +776,7 @@ func (gce *GCECloud) firewallNeedsUpdate(name, serviceName, region, ipAddress st
|
|||
// Make sure the allowed ports match.
|
||||
allowedPorts := make([]string, len(ports))
|
||||
for ix := range ports {
|
||||
allowedPorts[ix] = strconv.Itoa(ports[ix].Port)
|
||||
allowedPorts[ix] = strconv.Itoa(int(ports[ix].Port))
|
||||
}
|
||||
if !slicesEqual(allowedPorts, fw.Allowed[0].Ports) {
|
||||
return true, true, nil
|
||||
|
@ -910,7 +910,7 @@ func (gce *GCECloud) updateFirewall(name, region, desc string, sourceRanges nets
|
|||
func (gce *GCECloud) firewallObject(name, region, desc string, sourceRanges netsets.IPNet, ports []api.ServicePort, hosts []*gceInstance) (*compute.Firewall, error) {
|
||||
allowedPorts := make([]string, len(ports))
|
||||
for ix := range ports {
|
||||
allowedPorts[ix] = strconv.Itoa(ports[ix].Port)
|
||||
allowedPorts[ix] = strconv.Itoa(int(ports[ix].Port))
|
||||
}
|
||||
hostTags, err := gce.computeHostTags(hosts)
|
||||
if err != nil {
|
||||
|
@ -1248,7 +1248,7 @@ func (gce *GCECloud) CreateFirewall(name, desc string, sourceRanges netsets.IPNe
|
|||
// if UDP ports are required. This means the method signature will change
|
||||
// forcing downstream clients to refactor interfaces.
|
||||
for _, p := range ports {
|
||||
svcPorts = append(svcPorts, api.ServicePort{Port: int(p), Protocol: api.ProtocolTCP})
|
||||
svcPorts = append(svcPorts, api.ServicePort{Port: int32(p), Protocol: api.ProtocolTCP})
|
||||
}
|
||||
hosts, err := gce.getInstancesByNames(hostNames)
|
||||
if err != nil {
|
||||
|
@ -1282,7 +1282,7 @@ func (gce *GCECloud) UpdateFirewall(name, desc string, sourceRanges netsets.IPNe
|
|||
// if UDP ports are required. This means the method signature will change,
|
||||
// forcing downstream clients to refactor interfaces.
|
||||
for _, p := range ports {
|
||||
svcPorts = append(svcPorts, api.ServicePort{Port: int(p), Protocol: api.ProtocolTCP})
|
||||
svcPorts = append(svcPorts, api.ServicePort{Port: int32(p), Protocol: api.ProtocolTCP})
|
||||
}
|
||||
hosts, err := gce.getInstancesByNames(hostNames)
|
||||
if err != nil {
|
||||
|
|
|
@ -740,7 +740,7 @@ func (lb *LoadBalancer) EnsureLoadBalancer(apiService *api.Service, hosts []stri
|
|||
|
||||
_, err = members.Create(lb.network, members.CreateOpts{
|
||||
PoolID: pool.ID,
|
||||
ProtocolPort: ports[0].NodePort, //TODO: need to handle multi-port
|
||||
ProtocolPort: int(ports[0].NodePort), //TODO: need to handle multi-port
|
||||
Address: addr,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
|
@ -774,7 +774,7 @@ func (lb *LoadBalancer) EnsureLoadBalancer(apiService *api.Service, hosts []stri
|
|||
Name: name,
|
||||
Description: fmt.Sprintf("Kubernetes external service %s", name),
|
||||
Protocol: "TCP",
|
||||
ProtocolPort: ports[0].Port, //TODO: need to handle multi-port
|
||||
ProtocolPort: int(ports[0].Port), //TODO: need to handle multi-port
|
||||
PoolID: pool.ID,
|
||||
SubnetID: lb.opts.SubnetId,
|
||||
Persistence: persistence,
|
||||
|
|
|
@ -576,7 +576,7 @@ func podReadyTime(pod *api.Pod) unversioned.Time {
|
|||
func maxContainerRestarts(pod *api.Pod) int {
|
||||
maxRestarts := 0
|
||||
for _, c := range pod.Status.ContainerStatuses {
|
||||
maxRestarts = integer.IntMax(maxRestarts, c.RestartCount)
|
||||
maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount))
|
||||
}
|
||||
return maxRestarts
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ func newReplicationController(replicas int) *api.ReplicationController {
|
|||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: replicas,
|
||||
Replicas: int32(replicas),
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
|
|
@ -553,15 +553,15 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) {
|
|||
}
|
||||
|
||||
func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int) error {
|
||||
if ds.Status.DesiredNumberScheduled == desiredNumberScheduled && ds.Status.CurrentNumberScheduled == currentNumberScheduled && ds.Status.NumberMisscheduled == numberMisscheduled {
|
||||
if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled && int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled && int(ds.Status.NumberMisscheduled) == numberMisscheduled {
|
||||
return nil
|
||||
}
|
||||
|
||||
var updateErr, getErr error
|
||||
for i := 0; i <= StatusUpdateRetries; i++ {
|
||||
ds.Status.DesiredNumberScheduled = desiredNumberScheduled
|
||||
ds.Status.CurrentNumberScheduled = currentNumberScheduled
|
||||
ds.Status.NumberMisscheduled = numberMisscheduled
|
||||
ds.Status.DesiredNumberScheduled = int32(desiredNumberScheduled)
|
||||
ds.Status.CurrentNumberScheduled = int32(currentNumberScheduled)
|
||||
ds.Status.NumberMisscheduled = int32(numberMisscheduled)
|
||||
|
||||
_, updateErr = dsClient.UpdateStatus(ds)
|
||||
if updateErr == nil {
|
||||
|
|
|
@ -1065,12 +1065,12 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep
|
|||
}
|
||||
|
||||
// cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted.
|
||||
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int) ([]*extensions.ReplicaSet, int, error) {
|
||||
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int32) ([]*extensions.ReplicaSet, int32, error) {
|
||||
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
|
||||
// Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order
|
||||
// such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will
|
||||
// been deleted first and won't increase unavailability.
|
||||
totalScaledDown := 0
|
||||
totalScaledDown := int32(0)
|
||||
for i, targetRS := range oldRSs {
|
||||
if totalScaledDown >= maxCleanupCount {
|
||||
break
|
||||
|
@ -1088,7 +1088,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re
|
|||
continue
|
||||
}
|
||||
|
||||
scaledDownCount := integer.IntMin(maxCleanupCount-totalScaledDown, targetRS.Spec.Replicas-readyPodCount)
|
||||
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(targetRS.Spec.Replicas-readyPodCount)))
|
||||
newReplicasCount := targetRS.Spec.Replicas - scaledDownCount
|
||||
if newReplicasCount > targetRS.Spec.Replicas {
|
||||
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount)
|
||||
|
@ -1105,7 +1105,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re
|
|||
|
||||
// scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate".
|
||||
// Need check maxUnavailable to ensure availability
|
||||
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int, error) {
|
||||
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int32, error) {
|
||||
_, maxUnavailable, err := deploymentutil.ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
@ -1126,7 +1126,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
|
|||
|
||||
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
|
||||
|
||||
totalScaledDown := 0
|
||||
totalScaledDown := int32(0)
|
||||
totalScaleDownCount := readyPodCount - minAvailable
|
||||
for _, targetRS := range oldRSs {
|
||||
if totalScaledDown >= totalScaleDownCount {
|
||||
|
@ -1138,7 +1138,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
|
|||
continue
|
||||
}
|
||||
// Scale down.
|
||||
scaleDownCount := integer.IntMin(targetRS.Spec.Replicas, totalScaleDownCount-totalScaledDown)
|
||||
scaleDownCount := int32(integer.IntMin(int(targetRS.Spec.Replicas), int(totalScaleDownCount-totalScaledDown)))
|
||||
newReplicasCount := targetRS.Spec.Replicas - scaleDownCount
|
||||
if newReplicasCount > targetRS.Spec.Replicas {
|
||||
return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount)
|
||||
|
@ -1180,7 +1180,7 @@ func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extension
|
|||
}
|
||||
|
||||
func (dc *DeploymentController) cleanupOldReplicaSets(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error {
|
||||
diff := len(oldRSs) - *deployment.Spec.RevisionHistoryLimit
|
||||
diff := int32(len(oldRSs)) - *deployment.Spec.RevisionHistoryLimit
|
||||
if diff <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -1189,7 +1189,7 @@ func (dc *DeploymentController) cleanupOldReplicaSets(oldRSs []*extensions.Repli
|
|||
|
||||
var errList []error
|
||||
// TODO: This should be parallelized.
|
||||
for i := 0; i < diff; i++ {
|
||||
for i := int32(0); i < diff; i++ {
|
||||
rs := oldRSs[i]
|
||||
// Avoid delete replica set with non-zero replica counts
|
||||
if rs.Status.Replicas != 0 || rs.Spec.Replicas != 0 || rs.Generation > rs.Status.ObservedGeneration {
|
||||
|
@ -1223,7 +1223,7 @@ func (dc *DeploymentController) updateDeploymentStatus(allRSs []*extensions.Repl
|
|||
return err
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (totalActualReplicas, updatedReplicas, availableReplicas, unavailableReplicas int, err error) {
|
||||
func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (totalActualReplicas, updatedReplicas, availableReplicas, unavailableReplicas int32, err error) {
|
||||
totalActualReplicas = deploymentutil.GetActualReplicaCountForReplicaSets(allRSs)
|
||||
updatedReplicas = deploymentutil.GetActualReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS})
|
||||
minReadySeconds := deployment.Spec.MinReadySeconds
|
||||
|
@ -1237,7 +1237,7 @@ func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet,
|
|||
return
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) {
|
||||
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) {
|
||||
// No need to scale
|
||||
if rs.Spec.Replicas == newScale {
|
||||
return false, rs, nil
|
||||
|
@ -1257,7 +1257,7 @@ func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.Rep
|
|||
return true, newRS, err
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int) (*extensions.ReplicaSet, error) {
|
||||
func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int32) (*extensions.ReplicaSet, error) {
|
||||
// TODO: Using client for now, update to use store when it is ready.
|
||||
// NOTE: This mutates the ReplicaSet passed in. Not sure if that's a good idea.
|
||||
rs.Spec.Replicas = newScale
|
||||
|
|
|
@ -39,7 +39,7 @@ func rs(name string, replicas int, selector map[string]string) *exp.ReplicaSet {
|
|||
Name: name,
|
||||
},
|
||||
Spec: exp.ReplicaSetSpec{
|
||||
Replicas: replicas,
|
||||
Replicas: int32(replicas),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: selector},
|
||||
Template: api.PodTemplateSpec{},
|
||||
},
|
||||
|
@ -49,7 +49,7 @@ func rs(name string, replicas int, selector map[string]string) *exp.ReplicaSet {
|
|||
func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *exp.ReplicaSet {
|
||||
rs := rs(name, specReplicas, selector)
|
||||
rs.Status = exp.ReplicaSetStatus{
|
||||
Replicas: statusReplicas,
|
||||
Replicas: int32(statusReplicas),
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ func deployment(name string, replicas int, maxSurge, maxUnavailable intstr.IntOr
|
|||
Name: name,
|
||||
},
|
||||
Spec: exp.DeploymentSpec{
|
||||
Replicas: replicas,
|
||||
Replicas: int32(replicas),
|
||||
Strategy: exp.DeploymentStrategy{
|
||||
Type: exp.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &exp.RollingUpdateDeployment{
|
||||
|
@ -75,6 +75,11 @@ func deployment(name string, replicas int, maxSurge, maxUnavailable intstr.IntOr
|
|||
var alwaysReady = func() bool { return true }
|
||||
|
||||
func newDeployment(replicas int, revisionHistoryLimit *int) *exp.Deployment {
|
||||
var v *int32
|
||||
if revisionHistoryLimit != nil {
|
||||
v = new(int32)
|
||||
*v = int32(*revisionHistoryLimit)
|
||||
}
|
||||
d := exp.Deployment{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -88,7 +93,7 @@ func newDeployment(replicas int, revisionHistoryLimit *int) *exp.Deployment {
|
|||
Type: exp.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &exp.RollingUpdateDeployment{},
|
||||
},
|
||||
Replicas: replicas,
|
||||
Replicas: int32(replicas),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -105,7 +110,7 @@ func newDeployment(replicas int, revisionHistoryLimit *int) *exp.Deployment {
|
|||
},
|
||||
},
|
||||
},
|
||||
RevisionHistoryLimit: revisionHistoryLimit,
|
||||
RevisionHistoryLimit: v,
|
||||
},
|
||||
}
|
||||
return &d
|
||||
|
@ -118,7 +123,7 @@ func newReplicaSet(d *exp.Deployment, name string, replicas int) *exp.ReplicaSet
|
|||
Namespace: api.NamespaceDefault,
|
||||
},
|
||||
Spec: exp.ReplicaSetSpec{
|
||||
Replicas: replicas,
|
||||
Replicas: int32(replicas),
|
||||
Template: d.Spec.Template,
|
||||
},
|
||||
}
|
||||
|
@ -211,7 +216,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*exp.ReplicaSet)
|
||||
if e, a := test.expectedNewReplicas, updated.Spec.Replicas; e != a {
|
||||
if e, a := test.expectedNewReplicas, int(updated.Spec.Replicas); e != a {
|
||||
t.Errorf("expected update to %d replicas, got %d", e, a)
|
||||
}
|
||||
}
|
||||
|
@ -470,12 +475,12 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
|
|||
client: &fakeClientset,
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
}
|
||||
_, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, &deployment, test.maxCleanupCount)
|
||||
_, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, &deployment, int32(test.maxCleanupCount))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
if cleanupCount != test.cleanupCountExpected {
|
||||
if int(cleanupCount) != test.cleanupCountExpected {
|
||||
t.Errorf("expected %v unhealthy replicas been cleaned up, got %v", test.cleanupCountExpected, cleanupCount)
|
||||
continue
|
||||
}
|
||||
|
@ -598,7 +603,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
|
|||
continue
|
||||
}
|
||||
updated := updateAction.GetObject().(*exp.ReplicaSet)
|
||||
if e, a := test.expectedOldReplicas, updated.Spec.Replicas; e != a {
|
||||
if e, a := test.expectedOldReplicas, int(updated.Spec.Replicas); e != a {
|
||||
t.Errorf("expected update to %d replicas, got %d", e, a)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -378,7 +378,7 @@ func (e *EndpointController) syncService(key string) {
|
|||
continue
|
||||
}
|
||||
|
||||
epp := api.EndpointPort{Name: portName, Port: portNum, Protocol: portProto}
|
||||
epp := api.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
|
||||
epa := api.EndpointAddress{
|
||||
IP: pod.Status.PodIP,
|
||||
TargetRef: &api.ObjectReference{
|
||||
|
|
|
@ -65,7 +65,7 @@ func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotRea
|
|||
}
|
||||
for j := 0; j < nPorts; j++ {
|
||||
p.Spec.Containers[0].Ports = append(p.Spec.Containers[0].Ports,
|
||||
api.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: 8080 + j})
|
||||
api.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: int32(8080 + j)})
|
||||
}
|
||||
store.Add(p)
|
||||
}
|
||||
|
|
|
@ -339,7 +339,7 @@ func (jm *JobController) syncJob(key string) error {
|
|||
}
|
||||
|
||||
activePods := controller.FilterActivePods(podList.Items)
|
||||
active := len(activePods)
|
||||
active := int32(len(activePods))
|
||||
succeeded, failed := getStatus(podList.Items)
|
||||
conditions := len(job.Status.Conditions)
|
||||
if job.Status.StartTime == nil {
|
||||
|
@ -358,9 +358,9 @@ func (jm *JobController) syncJob(key string) error {
|
|||
// some sort of solution to above problem.
|
||||
// kill remaining active pods
|
||||
wait := sync.WaitGroup{}
|
||||
wait.Add(active)
|
||||
for i := 0; i < active; i++ {
|
||||
go func(ix int) {
|
||||
wait.Add(int(active))
|
||||
for i := int32(0); i < active; i++ {
|
||||
go func(ix int32) {
|
||||
defer wait.Done()
|
||||
if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, &job); err != nil {
|
||||
defer utilruntime.HandleError(err)
|
||||
|
@ -449,17 +449,17 @@ func newCondition(conditionType batch.JobConditionType, reason, message string)
|
|||
}
|
||||
|
||||
// getStatus returns no of succeeded and failed pods running a job
|
||||
func getStatus(pods []api.Pod) (succeeded, failed int) {
|
||||
succeeded = filterPods(pods, api.PodSucceeded)
|
||||
failed = filterPods(pods, api.PodFailed)
|
||||
func getStatus(pods []api.Pod) (succeeded, failed int32) {
|
||||
succeeded = int32(filterPods(pods, api.PodSucceeded))
|
||||
failed = int32(filterPods(pods, api.PodFailed))
|
||||
return
|
||||
}
|
||||
|
||||
// manageJob is the core method responsible for managing the number of running
|
||||
// pods according to what is specified in the job.Spec.
|
||||
func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *batch.Job) int {
|
||||
func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int32, job *batch.Job) int32 {
|
||||
var activeLock sync.Mutex
|
||||
active := len(activePods)
|
||||
active := int32(len(activePods))
|
||||
parallelism := *job.Spec.Parallelism
|
||||
jobKey, err := controller.KeyFunc(job)
|
||||
if err != nil {
|
||||
|
@ -469,7 +469,7 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ba
|
|||
|
||||
if active > parallelism {
|
||||
diff := active - parallelism
|
||||
jm.expectations.ExpectDeletions(jobKey, diff)
|
||||
jm.expectations.ExpectDeletions(jobKey, int(diff))
|
||||
glog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff)
|
||||
// Sort the pods in the order such that not-ready < ready, unscheduled
|
||||
// < scheduled, and pending < running. This ensures that we delete pods
|
||||
|
@ -478,9 +478,9 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ba
|
|||
|
||||
active -= diff
|
||||
wait := sync.WaitGroup{}
|
||||
wait.Add(diff)
|
||||
for i := 0; i < diff; i++ {
|
||||
go func(ix int) {
|
||||
wait.Add(int(diff))
|
||||
for i := int32(0); i < diff; i++ {
|
||||
go func(ix int32) {
|
||||
defer wait.Done()
|
||||
if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, job); err != nil {
|
||||
defer utilruntime.HandleError(err)
|
||||
|
@ -495,7 +495,7 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ba
|
|||
wait.Wait()
|
||||
|
||||
} else if active < parallelism {
|
||||
wantActive := 0
|
||||
wantActive := int32(0)
|
||||
if job.Spec.Completions == nil {
|
||||
// Job does not specify a number of completions. Therefore, number active
|
||||
// should be equal to parallelism, unless the job has seen at least
|
||||
|
@ -518,13 +518,13 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ba
|
|||
glog.Errorf("More active than wanted: job %q, want %d, have %d", jobKey, wantActive, active)
|
||||
diff = 0
|
||||
}
|
||||
jm.expectations.ExpectCreations(jobKey, diff)
|
||||
jm.expectations.ExpectCreations(jobKey, int(diff))
|
||||
glog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, wantActive, diff)
|
||||
|
||||
active += diff
|
||||
wait := sync.WaitGroup{}
|
||||
wait.Add(diff)
|
||||
for i := 0; i < diff; i++ {
|
||||
wait.Add(int(diff))
|
||||
for i := int32(0); i < diff; i++ {
|
||||
go func() {
|
||||
defer wait.Done()
|
||||
if err := jm.podControl.CreatePods(job.Namespace, &job.Spec.Template, job); err != nil {
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
|
||||
var alwaysReady = func() bool { return true }
|
||||
|
||||
func newJob(parallelism, completions int) *batch.Job {
|
||||
func newJob(parallelism, completions int32) *batch.Job {
|
||||
j := &batch.Job{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foobar",
|
||||
|
@ -86,9 +86,9 @@ func getKey(job *batch.Job, t *testing.T) string {
|
|||
}
|
||||
|
||||
// create count pods with the given phase for the given job
|
||||
func newPodList(count int, status api.PodPhase, job *batch.Job) []api.Pod {
|
||||
func newPodList(count int32, status api.PodPhase, job *batch.Job) []api.Pod {
|
||||
pods := []api.Pod{}
|
||||
for i := 0; i < count; i++ {
|
||||
for i := int32(0); i < count; i++ {
|
||||
newPod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%v", rand.String(10)),
|
||||
|
@ -105,21 +105,21 @@ func newPodList(count int, status api.PodPhase, job *batch.Job) []api.Pod {
|
|||
func TestControllerSyncJob(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
// job setup
|
||||
parallelism int
|
||||
completions int
|
||||
parallelism int32
|
||||
completions int32
|
||||
|
||||
// pod setup
|
||||
podControllerError error
|
||||
activePods int
|
||||
succeededPods int
|
||||
failedPods int
|
||||
activePods int32
|
||||
succeededPods int32
|
||||
failedPods int32
|
||||
|
||||
// expectations
|
||||
expectedCreations int
|
||||
expectedDeletions int
|
||||
expectedActive int
|
||||
expectedSucceeded int
|
||||
expectedFailed int
|
||||
expectedCreations int32
|
||||
expectedDeletions int32
|
||||
expectedActive int32
|
||||
expectedSucceeded int32
|
||||
expectedFailed int32
|
||||
expectedComplete bool
|
||||
}{
|
||||
"job start": {
|
||||
|
@ -237,10 +237,10 @@ func TestControllerSyncJob(t *testing.T) {
|
|||
}
|
||||
|
||||
// validate created/deleted pods
|
||||
if len(fakePodControl.Templates) != tc.expectedCreations {
|
||||
if int32(len(fakePodControl.Templates)) != tc.expectedCreations {
|
||||
t.Errorf("%s: unexpected number of creates. Expected %d, saw %d\n", name, tc.expectedCreations, len(fakePodControl.Templates))
|
||||
}
|
||||
if len(fakePodControl.DeletePodName) != tc.expectedDeletions {
|
||||
if int32(len(fakePodControl.DeletePodName)) != tc.expectedDeletions {
|
||||
t.Errorf("%s: unexpected number of deletes. Expected %d, saw %d\n", name, tc.expectedDeletions, len(fakePodControl.DeletePodName))
|
||||
}
|
||||
// validate status
|
||||
|
@ -266,21 +266,21 @@ func TestControllerSyncJob(t *testing.T) {
|
|||
func TestSyncJobPastDeadline(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
// job setup
|
||||
parallelism int
|
||||
completions int
|
||||
parallelism int32
|
||||
completions int32
|
||||
activeDeadlineSeconds int64
|
||||
startTime int64
|
||||
|
||||
// pod setup
|
||||
activePods int
|
||||
succeededPods int
|
||||
failedPods int
|
||||
activePods int32
|
||||
succeededPods int32
|
||||
failedPods int32
|
||||
|
||||
// expectations
|
||||
expectedDeletions int
|
||||
expectedActive int
|
||||
expectedSucceeded int
|
||||
expectedFailed int
|
||||
expectedDeletions int32
|
||||
expectedActive int32
|
||||
expectedSucceeded int32
|
||||
expectedFailed int32
|
||||
}{
|
||||
"activeDeadlineSeconds less than single pod execution": {
|
||||
1, 1, 10, 15,
|
||||
|
@ -335,10 +335,10 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||
}
|
||||
|
||||
// validate created/deleted pods
|
||||
if len(fakePodControl.Templates) != 0 {
|
||||
if int32(len(fakePodControl.Templates)) != 0 {
|
||||
t.Errorf("%s: unexpected number of creates. Expected 0, saw %d\n", name, len(fakePodControl.Templates))
|
||||
}
|
||||
if len(fakePodControl.DeletePodName) != tc.expectedDeletions {
|
||||
if int32(len(fakePodControl.DeletePodName)) != tc.expectedDeletions {
|
||||
t.Errorf("%s: unexpected number of deletes. Expected %d, saw %d\n", name, tc.expectedDeletions, len(fakePodControl.DeletePodName))
|
||||
}
|
||||
// validate status
|
||||
|
|
|
@ -128,8 +128,8 @@ func (a *HorizontalController) Run(stopCh <-chan struct{}) {
|
|||
glog.Infof("Shutting down HPA Controller")
|
||||
}
|
||||
|
||||
func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale) (int, *int, time.Time, error) {
|
||||
targetUtilization := defaultTargetCPUUtilizationPercentage
|
||||
func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale) (int32, *int32, time.Time, error) {
|
||||
targetUtilization := int32(defaultTargetCPUUtilizationPercentage)
|
||||
if hpa.Spec.CPUUtilization != nil {
|
||||
targetUtilization = hpa.Spec.CPUUtilization.TargetPercentage
|
||||
}
|
||||
|
@ -155,11 +155,13 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions.
|
|||
return 0, nil, time.Time{}, fmt.Errorf("failed to get CPU utilization: %v", err)
|
||||
}
|
||||
|
||||
usageRatio := float64(*currentUtilization) / float64(targetUtilization)
|
||||
utilization := int32(*currentUtilization)
|
||||
|
||||
usageRatio := float64(utilization) / float64(targetUtilization)
|
||||
if math.Abs(1.0-usageRatio) > tolerance {
|
||||
return int(math.Ceil(usageRatio * float64(currentReplicas))), currentUtilization, timestamp, nil
|
||||
return int32(math.Ceil(usageRatio * float64(currentReplicas))), &utilization, timestamp, nil
|
||||
} else {
|
||||
return currentReplicas, currentUtilization, timestamp, nil
|
||||
return currentReplicas, &utilization, timestamp, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -169,7 +171,7 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions.
|
|||
// status string (also json-serialized extensions.CustomMetricsCurrentStatusList),
|
||||
// last timestamp of the metrics involved in computations or error, if occurred.
|
||||
func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale,
|
||||
cmAnnotation string) (replicas int, metric string, status string, timestamp time.Time, err error) {
|
||||
cmAnnotation string) (replicas int32, metric string, status string, timestamp time.Time, err error) {
|
||||
|
||||
currentReplicas := scale.Status.Replicas
|
||||
replicas = 0
|
||||
|
@ -216,9 +218,9 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.H
|
|||
floatTarget := float64(customMetricTarget.TargetValue.MilliValue()) / 1000.0
|
||||
usageRatio := *value / floatTarget
|
||||
|
||||
replicaCountProposal := 0
|
||||
replicaCountProposal := int32(0)
|
||||
if math.Abs(1.0-usageRatio) > tolerance {
|
||||
replicaCountProposal = int(math.Ceil(usageRatio * float64(currentReplicas)))
|
||||
replicaCountProposal = int32(math.Ceil(usageRatio * float64(currentReplicas)))
|
||||
} else {
|
||||
replicaCountProposal = currentReplicas
|
||||
}
|
||||
|
@ -254,16 +256,16 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPod
|
|||
}
|
||||
currentReplicas := scale.Status.Replicas
|
||||
|
||||
cpuDesiredReplicas := 0
|
||||
var cpuCurrentUtilization *int = nil
|
||||
cpuDesiredReplicas := int32(0)
|
||||
var cpuCurrentUtilization *int32 = nil
|
||||
cpuTimestamp := time.Time{}
|
||||
|
||||
cmDesiredReplicas := 0
|
||||
cmDesiredReplicas := int32(0)
|
||||
cmMetric := ""
|
||||
cmStatus := ""
|
||||
cmTimestamp := time.Time{}
|
||||
|
||||
desiredReplicas := 0
|
||||
desiredReplicas := int32(0)
|
||||
rescaleReason := ""
|
||||
timestamp := time.Now()
|
||||
|
||||
|
@ -347,7 +349,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPod
|
|||
return a.updateStatus(hpa, currentReplicas, desiredReplicas, cpuCurrentUtilization, cmStatus, rescale)
|
||||
}
|
||||
|
||||
func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, timestamp time.Time) bool {
|
||||
func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, timestamp time.Time) bool {
|
||||
if desiredReplicas != currentReplicas {
|
||||
// Going down only if the usageRatio dropped significantly below the target
|
||||
// and there was no rescaling in the last downscaleForbiddenWindow.
|
||||
|
@ -368,14 +370,14 @@ func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desir
|
|||
return false
|
||||
}
|
||||
|
||||
func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas int) {
|
||||
func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas int32) {
|
||||
err := a.updateStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentCPUUtilizationPercentage, hpa.Annotations[HpaCustomMetricsStatusAnnotationName], false)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *HorizontalController) updateStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, cpuCurrentUtilization *int, cmStatus string, rescale bool) error {
|
||||
func (a *HorizontalController) updateStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, cpuCurrentUtilization *int32, cmStatus string, rescale bool) error {
|
||||
hpa.Status = extensions.HorizontalPodAutoscalerStatus{
|
||||
CurrentReplicas: currentReplicas,
|
||||
DesiredReplicas: desiredReplicas,
|
||||
|
|
|
@ -67,14 +67,14 @@ type fakeResource struct {
|
|||
|
||||
type testCase struct {
|
||||
sync.Mutex
|
||||
minReplicas int
|
||||
maxReplicas int
|
||||
initialReplicas int
|
||||
desiredReplicas int
|
||||
minReplicas int32
|
||||
maxReplicas int32
|
||||
initialReplicas int32
|
||||
desiredReplicas int32
|
||||
|
||||
// CPU target utilization as a percentage of the requested resources.
|
||||
CPUTarget int
|
||||
CPUCurrent int
|
||||
CPUTarget int32
|
||||
CPUCurrent int32
|
||||
verifyCPUCurrent bool
|
||||
reportedLevels []uint64
|
||||
reportedCPURequests []resource.Quantity
|
||||
|
@ -103,7 +103,7 @@ func (tc *testCase) computeCPUCurrent() {
|
|||
for _, req := range tc.reportedCPURequests {
|
||||
requested += int(req.MilliValue())
|
||||
}
|
||||
tc.CPUCurrent = 100 * reported / requested
|
||||
tc.CPUCurrent = int32(100 * reported / requested)
|
||||
}
|
||||
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
|
|
|
@ -421,7 +421,7 @@ func (rsc *ReplicaSetController) worker() {
|
|||
|
||||
// manageReplicas checks and updates replicas for the given ReplicaSet.
|
||||
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *extensions.ReplicaSet) {
|
||||
diff := len(filteredPods) - rs.Spec.Replicas
|
||||
diff := len(filteredPods) - int(rs.Spec.Replicas)
|
||||
rsKey, err := controller.KeyFunc(rs)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err)
|
||||
|
|
|
@ -66,7 +66,7 @@ func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.Repl
|
|||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: replicas,
|
||||
Replicas: int32(replicas),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: selectorMap},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -237,7 +237,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
|||
labelMap := map[string]string{"foo": "bar"}
|
||||
rs := newReplicaSet(activePods, labelMap)
|
||||
manager.rsStore.Store.Add(rs)
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: activePods}
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods)}
|
||||
newPodList(manager.podStore.Store, activePods, api.PodRunning, labelMap, rs, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
|
@ -643,7 +643,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
|||
// returned a ReplicaSet with replicas=1.
|
||||
if c, ok := action.GetObject().(*extensions.ReplicaSet); !ok {
|
||||
t.Errorf("Expected a ReplicaSet as the argument to update, got %T", c)
|
||||
} else if c.Status.Replicas != numReplicas {
|
||||
} else if int(c.Status.Replicas) != numReplicas {
|
||||
t.Errorf("Expected update for ReplicaSet to contain replicas %v, got %v instead",
|
||||
numReplicas, c.Status.Replicas)
|
||||
}
|
||||
|
@ -669,7 +669,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
rsSpec := newReplicaSet(numReplicas, labelMap)
|
||||
manager.rsStore.Store.Add(rsSpec)
|
||||
|
||||
expectedPods := 0
|
||||
expectedPods := int32(0)
|
||||
pods := newPodList(nil, numReplicas, api.PodPending, labelMap, rsSpec, "pod")
|
||||
|
||||
rsKey, err := controller.KeyFunc(rsSpec)
|
||||
|
@ -678,7 +678,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
}
|
||||
|
||||
// Size up the controller, then size it down, and confirm the expected create/delete pattern
|
||||
for _, replicas := range []int{numReplicas, 0} {
|
||||
for _, replicas := range []int32{int32(numReplicas), 0} {
|
||||
|
||||
rsSpec.Spec.Replicas = replicas
|
||||
manager.rsStore.Store.Add(rsSpec)
|
||||
|
@ -688,21 +688,21 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
|
||||
// The store accrues active pods. It's also used by the ReplicaSet to determine how many
|
||||
// replicas to create.
|
||||
activePods := len(manager.podStore.Store.List())
|
||||
activePods := int32(len(manager.podStore.Store.List()))
|
||||
if replicas != 0 {
|
||||
// This is the number of pods currently "in flight". They were created by the
|
||||
// ReplicaSet controller above, which then puts the ReplicaSet to sleep till
|
||||
// all of them have been observed.
|
||||
expectedPods = replicas - activePods
|
||||
if expectedPods > burstReplicas {
|
||||
expectedPods = burstReplicas
|
||||
if expectedPods > int32(burstReplicas) {
|
||||
expectedPods = int32(burstReplicas)
|
||||
}
|
||||
// This validates the ReplicaSet manager sync actually created pods
|
||||
validateSyncReplicaSet(t, &fakePodControl, expectedPods, 0)
|
||||
validateSyncReplicaSet(t, &fakePodControl, int(expectedPods), 0)
|
||||
|
||||
// This simulates the watch events for all but 1 of the expected pods.
|
||||
// None of these should wake the controller because it has expectations==BurstReplicas.
|
||||
for i := 0; i < expectedPods-1; i++ {
|
||||
for i := int32(0); i < expectedPods-1; i++ {
|
||||
manager.podStore.Store.Add(&pods.Items[i])
|
||||
manager.addPod(&pods.Items[i])
|
||||
}
|
||||
|
@ -716,10 +716,10 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
}
|
||||
} else {
|
||||
expectedPods = (replicas - activePods) * -1
|
||||
if expectedPods > burstReplicas {
|
||||
expectedPods = burstReplicas
|
||||
if expectedPods > int32(burstReplicas) {
|
||||
expectedPods = int32(burstReplicas)
|
||||
}
|
||||
validateSyncReplicaSet(t, &fakePodControl, 0, expectedPods)
|
||||
validateSyncReplicaSet(t, &fakePodControl, 0, int(expectedPods))
|
||||
|
||||
// To accurately simulate a watch we must delete the exact pods
|
||||
// the rs is waiting for.
|
||||
|
@ -782,12 +782,12 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
}
|
||||
|
||||
// Confirm that we've created the right number of replicas
|
||||
activePods := len(manager.podStore.Store.List())
|
||||
activePods := int32(len(manager.podStore.Store.List()))
|
||||
if activePods != rsSpec.Spec.Replicas {
|
||||
t.Fatalf("Unexpected number of active pods, expected %d, got %d", rsSpec.Spec.Replicas, activePods)
|
||||
}
|
||||
// Replenish the pod list, since we cut it down sizing up
|
||||
pods = newPodList(nil, replicas, api.PodRunning, labelMap, rsSpec, "pod")
|
||||
pods = newPodList(nil, int(replicas), api.PodRunning, labelMap, rsSpec, "pod")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -31,8 +31,8 @@ func updateReplicaCount(rsClient client.ReplicaSetInterface, rs extensions.Repli
|
|||
// This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since
|
||||
// we do a periodic relist every 30s. If the generations differ but the replicas are
|
||||
// the same, a caller might've resized to the same replica count.
|
||||
if rs.Status.Replicas == numReplicas &&
|
||||
rs.Status.FullyLabeledReplicas == numFullyLabeledReplicas &&
|
||||
if int(rs.Status.Replicas) == numReplicas &&
|
||||
int(rs.Status.FullyLabeledReplicas) == numFullyLabeledReplicas &&
|
||||
rs.Generation == rs.Status.ObservedGeneration {
|
||||
return nil
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ func updateReplicaCount(rsClient client.ReplicaSetInterface, rs extensions.Repli
|
|||
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, numFullyLabeledReplicas) +
|
||||
fmt.Sprintf("sequence No: %v->%v", rs.Status.ObservedGeneration, generation))
|
||||
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: numReplicas, FullyLabeledReplicas: numFullyLabeledReplicas, ObservedGeneration: generation}
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: int32(numReplicas), FullyLabeledReplicas: int32(numFullyLabeledReplicas), ObservedGeneration: generation}
|
||||
_, updateErr = rsClient.UpdateStatus(rs)
|
||||
if updateErr == nil || i >= statusUpdateRetries {
|
||||
return updateErr
|
||||
|
|
|
@ -429,7 +429,7 @@ func (rm *ReplicationManager) worker() {
|
|||
|
||||
// manageReplicas checks and updates replicas for the given replication controller.
|
||||
func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.ReplicationController) {
|
||||
diff := len(filteredPods) - rc.Spec.Replicas
|
||||
diff := len(filteredPods) - int(rc.Spec.Replicas)
|
||||
rcKey, err := controller.KeyFunc(rc)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err)
|
||||
|
|
|
@ -65,7 +65,7 @@ func newReplicationController(replicas int) *api.ReplicationController {
|
|||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: replicas,
|
||||
Replicas: int32(replicas),
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -231,7 +231,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
|||
activePods := 5
|
||||
rc := newReplicationController(activePods)
|
||||
manager.rcStore.Store.Add(rc)
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: activePods}
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: int32(activePods)}
|
||||
newPodList(manager.podStore.Store, activePods, api.PodRunning, rc, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
|
@ -628,7 +628,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
|||
// returned an rc with replicas=1.
|
||||
if c, ok := action.GetObject().(*api.ReplicationController); !ok {
|
||||
t.Errorf("Expected an rc as the argument to update, got %T", c)
|
||||
} else if c.Status.Replicas != numReplicas {
|
||||
} else if c.Status.Replicas != int32(numReplicas) {
|
||||
t.Errorf("Expected update for rc to contain replicas %v, got %v instead",
|
||||
numReplicas, c.Status.Replicas)
|
||||
}
|
||||
|
@ -664,7 +664,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
// Size up the controller, then size it down, and confirm the expected create/delete pattern
|
||||
for _, replicas := range []int{numReplicas, 0} {
|
||||
|
||||
controllerSpec.Spec.Replicas = replicas
|
||||
controllerSpec.Spec.Replicas = int32(replicas)
|
||||
manager.rcStore.Store.Add(controllerSpec)
|
||||
|
||||
for i := 0; i < numReplicas; i += burstReplicas {
|
||||
|
@ -765,7 +765,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
}
|
||||
|
||||
// Confirm that we've created the right number of replicas
|
||||
activePods := len(manager.podStore.Store.List())
|
||||
activePods := int32(len(manager.podStore.Store.List()))
|
||||
if activePods != controllerSpec.Spec.Replicas {
|
||||
t.Fatalf("Unexpected number of active pods, expected %d, got %d", controllerSpec.Spec.Replicas, activePods)
|
||||
}
|
||||
|
|
|
@ -31,8 +31,8 @@ func updateReplicaCount(rcClient unversionedcore.ReplicationControllerInterface,
|
|||
// This is the steady state. It happens when the rc doesn't have any expectations, since
|
||||
// we do a periodic relist every 30s. If the generations differ but the replicas are
|
||||
// the same, a caller might've resized to the same replica count.
|
||||
if controller.Status.Replicas == numReplicas &&
|
||||
controller.Status.FullyLabeledReplicas == numFullyLabeledReplicas &&
|
||||
if int(controller.Status.Replicas) == numReplicas &&
|
||||
int(controller.Status.FullyLabeledReplicas) == numFullyLabeledReplicas &&
|
||||
controller.Generation == controller.Status.ObservedGeneration {
|
||||
return nil
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ func updateReplicaCount(rcClient unversionedcore.ReplicationControllerInterface,
|
|||
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", controller.Status.FullyLabeledReplicas, numFullyLabeledReplicas) +
|
||||
fmt.Sprintf("sequence No: %v->%v", controller.Status.ObservedGeneration, generation))
|
||||
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: numReplicas, FullyLabeledReplicas: numFullyLabeledReplicas, ObservedGeneration: generation}
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: int32(numReplicas), FullyLabeledReplicas: int32(numFullyLabeledReplicas), ObservedGeneration: generation}
|
||||
_, updateErr = rcClient.UpdateStatus(rc)
|
||||
if updateErr == nil || i >= statusUpdateRetries {
|
||||
return updateErr
|
||||
|
|
|
@ -97,14 +97,15 @@ func (HorizontalPodAutoscalerV1Beta1) Generate(genericParams map[string]interfac
|
|||
APIVersion: params["scaleRef-apiVersion"],
|
||||
Subresource: scaleSubResource,
|
||||
},
|
||||
MaxReplicas: max,
|
||||
MaxReplicas: int32(max),
|
||||
},
|
||||
}
|
||||
if min > 0 {
|
||||
scaler.Spec.MinReplicas = &min
|
||||
v := int32(min)
|
||||
scaler.Spec.MinReplicas = &v
|
||||
}
|
||||
if cpu >= 0 {
|
||||
scaler.Spec.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: cpu}
|
||||
scaler.Spec.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: int32(cpu)}
|
||||
}
|
||||
return &scaler, nil
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ func RunClusterInfo(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command) error
|
|||
ip = ingress.Hostname
|
||||
}
|
||||
for _, port := range service.Spec.Ports {
|
||||
link += "http://" + ip + ":" + strconv.Itoa(port.Port) + " "
|
||||
link += "http://" + ip + ":" + strconv.Itoa(int(port.Port)) + " "
|
||||
}
|
||||
} else {
|
||||
if len(client.GroupVersion.Group) == 0 {
|
||||
|
|
|
@ -173,7 +173,7 @@ See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more d
|
|||
func makePortsString(ports []api.ServicePort, useNodePort bool) string {
|
||||
pieces := make([]string, len(ports))
|
||||
for ix := range ports {
|
||||
var port int
|
||||
var port int32
|
||||
if useNodePort {
|
||||
port = ports[ix].NodePort
|
||||
} else {
|
||||
|
|
|
@ -743,7 +743,7 @@ func getPorts(spec api.PodSpec) []string {
|
|||
result := []string{}
|
||||
for _, container := range spec.Containers {
|
||||
for _, port := range container.Ports {
|
||||
result = append(result, strconv.Itoa(port.ContainerPort))
|
||||
result = append(result, strconv.Itoa(int(port.ContainerPort)))
|
||||
}
|
||||
}
|
||||
return result
|
||||
|
@ -753,7 +753,7 @@ func getPorts(spec api.PodSpec) []string {
|
|||
func getServicePorts(spec api.ServiceSpec) []string {
|
||||
result := []string{}
|
||||
for _, servicePort := range spec.Ports {
|
||||
result = append(result, strconv.Itoa(servicePort.Port))
|
||||
result = append(result, strconv.Itoa(int(servicePort.Port)))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
|
|
@ -1233,7 +1233,7 @@ func (i *IngressDescriber) describeBackend(ns string, backend *extensions.Ingres
|
|||
spName = sp.Name
|
||||
}
|
||||
case intstr.Int:
|
||||
if int(backend.ServicePort.IntVal) == sp.Port {
|
||||
if int32(backend.ServicePort.IntVal) == sp.Port {
|
||||
spName = sp.Name
|
||||
}
|
||||
}
|
||||
|
|
|
@ -596,7 +596,7 @@ func printPodBase(pod *api.Pod, w io.Writer, options PrintOptions) error {
|
|||
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
|
||||
container := pod.Status.ContainerStatuses[i]
|
||||
|
||||
restarts += container.RestartCount
|
||||
restarts += int(container.RestartCount)
|
||||
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
|
||||
reason = container.State.Waiting.Reason
|
||||
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
|
||||
|
|
|
@ -1342,7 +1342,7 @@ func TestPrintDaemonSet(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPrintJob(t *testing.T) {
|
||||
completions := 2
|
||||
completions := int32(2)
|
||||
tests := []struct {
|
||||
job batch.Job
|
||||
expect string
|
||||
|
|
|
@ -114,7 +114,7 @@ type RollingUpdater struct {
|
|||
// cleanup performs post deployment cleanup tasks for newRc and oldRc.
|
||||
cleanup func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error
|
||||
// getReadyPods returns the amount of old and new ready pods.
|
||||
getReadyPods func(oldRc, newRc *api.ReplicationController) (int, int, error)
|
||||
getReadyPods func(oldRc, newRc *api.ReplicationController) (int32, int32, error)
|
||||
}
|
||||
|
||||
// NewRollingUpdater creates a RollingUpdater from a client.
|
||||
|
@ -169,11 +169,12 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
|||
fmt.Fprintf(out, "Created %s\n", newRc.Name)
|
||||
}
|
||||
// Extract the desired replica count from the controller.
|
||||
desired, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation])
|
||||
desiredAnnotation, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse annotation for %s: %s=%s",
|
||||
newRc.Name, desiredReplicasAnnotation, newRc.Annotations[desiredReplicasAnnotation])
|
||||
}
|
||||
desired := int32(desiredAnnotation)
|
||||
// Extract the original replica count from the old controller, adding the
|
||||
// annotation if it doesn't yet exist.
|
||||
_, hasOriginalAnnotation := oldRc.Annotations[originalReplicasAnnotation]
|
||||
|
@ -185,7 +186,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
|||
if existing.Annotations == nil {
|
||||
existing.Annotations = map[string]string{}
|
||||
}
|
||||
existing.Annotations[originalReplicasAnnotation] = strconv.Itoa(existing.Spec.Replicas)
|
||||
existing.Annotations[originalReplicasAnnotation] = strconv.Itoa(int(existing.Spec.Replicas))
|
||||
updated, err := r.c.ReplicationControllers(existing.Namespace).Update(existing)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -204,7 +205,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
|||
}
|
||||
// The minumum pods which must remain available througout the update
|
||||
// calculated for internal convenience.
|
||||
minAvailable := integer.IntMax(0, desired-maxUnavailable)
|
||||
minAvailable := int32(integer.IntMax(0, int(desired-maxUnavailable)))
|
||||
// If the desired new scale is 0, then the max unavailable is necessarily
|
||||
// the effective scale of the old RC regardless of the configuration
|
||||
// (equivalent to 100% maxUnavailable).
|
||||
|
@ -258,7 +259,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
|||
// scaleUp scales up newRc to desired by whatever increment is possible given
|
||||
// the configured surge threshold. scaleUp will safely no-op as necessary when
|
||||
// it detects redundancy or other relevant conditions.
|
||||
func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desired, maxSurge, maxUnavailable int, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) {
|
||||
func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desired, maxSurge, maxUnavailable int32, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) {
|
||||
// If we're already at the desired, do nothing.
|
||||
if newRc.Spec.Replicas == desired {
|
||||
return newRc, nil
|
||||
|
@ -291,7 +292,7 @@ func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desire
|
|||
// scaleDown scales down oldRc to 0 at whatever decrement possible given the
|
||||
// thresholds defined on the config. scaleDown will safely no-op as necessary
|
||||
// when it detects redundancy or other relevant conditions.
|
||||
func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int, config *RollingUpdaterConfig) (*api.ReplicationController, error) {
|
||||
func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int32, config *RollingUpdaterConfig) (*api.ReplicationController, error) {
|
||||
// Already scaled down; do nothing.
|
||||
if oldRc.Spec.Replicas == 0 {
|
||||
return oldRc, nil
|
||||
|
@ -356,10 +357,10 @@ func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, r
|
|||
// readyPods returns the old and new ready counts for their pods.
|
||||
// If a pod is observed as being ready, it's considered ready even
|
||||
// if it later becomes notReady.
|
||||
func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController) (int, int, error) {
|
||||
func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController) (int32, int32, error) {
|
||||
controllers := []*api.ReplicationController{oldRc, newRc}
|
||||
oldReady := 0
|
||||
newReady := 0
|
||||
oldReady := int32(0)
|
||||
newReady := int32(0)
|
||||
|
||||
for i := range controllers {
|
||||
controller := controllers[i]
|
||||
|
|
|
@ -48,7 +48,7 @@ func oldRc(replicas int, original int) *api.ReplicationController {
|
|||
},
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: replicas,
|
||||
Replicas: int32(replicas),
|
||||
Selector: map[string]string{"version": "v1"},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -58,7 +58,7 @@ func oldRc(replicas int, original int) *api.ReplicationController {
|
|||
},
|
||||
},
|
||||
Status: api.ReplicationControllerStatus{
|
||||
Replicas: replicas,
|
||||
Replicas: int32(replicas),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -794,7 +794,7 @@ Scaling foo-v2 up to 2
|
|||
}
|
||||
if expected == -1 {
|
||||
t.Fatalf("unexpected scale of %s to %d", rc.Name, rc.Spec.Replicas)
|
||||
} else if e, a := expected, rc.Spec.Replicas; e != a {
|
||||
} else if e, a := expected, int(rc.Spec.Replicas); e != a {
|
||||
t.Fatalf("expected scale of %s to %d, got %d", rc.Name, e, a)
|
||||
}
|
||||
// Simulate the scale.
|
||||
|
@ -810,7 +810,7 @@ Scaling foo-v2 up to 2
|
|||
},
|
||||
}
|
||||
// Set up a mock readiness check which handles the test assertions.
|
||||
updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int, int, error) {
|
||||
updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int32, int32, error) {
|
||||
// Return simulated readiness, and throw an error if this call has no
|
||||
// expectations defined.
|
||||
oldReady := next(&oldReady)
|
||||
|
@ -818,7 +818,7 @@ Scaling foo-v2 up to 2
|
|||
if oldReady == -1 || newReady == -1 {
|
||||
t.Fatalf("unexpected getReadyPods call for:\noldRc: %+v\nnewRc: %+v", oldRc, newRc)
|
||||
}
|
||||
return oldReady, newReady, nil
|
||||
return int32(oldReady), int32(newReady), nil
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
config := &RollingUpdaterConfig{
|
||||
|
@ -860,7 +860,7 @@ func TestUpdate_progressTimeout(t *testing.T) {
|
|||
return nil
|
||||
},
|
||||
}
|
||||
updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int, int, error) {
|
||||
updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int32, int32, error) {
|
||||
// Coerce a timeout by pods never becoming ready.
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
@ -913,7 +913,7 @@ func TestUpdate_assignOriginalAnnotation(t *testing.T) {
|
|||
cleanup: func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error {
|
||||
return nil
|
||||
},
|
||||
getReadyPods: func(oldRc, newRc *api.ReplicationController) (int, int, error) {
|
||||
getReadyPods: func(oldRc, newRc *api.ReplicationController) (int32, int32, error) {
|
||||
return 1, 1, nil
|
||||
},
|
||||
}
|
||||
|
@ -1573,8 +1573,8 @@ func TestRollingUpdater_readyPods(t *testing.T) {
|
|||
oldRc *api.ReplicationController
|
||||
newRc *api.ReplicationController
|
||||
// expectated old/new ready counts
|
||||
oldReady int
|
||||
newReady int
|
||||
oldReady int32
|
||||
newReady int32
|
||||
// pods owned by the rcs; indicate whether they're ready
|
||||
oldPods []bool
|
||||
newPods []bool
|
||||
|
|
|
@ -105,7 +105,7 @@ func (DeploymentV1Beta1) Generate(genericParams map[string]interface{}) (runtime
|
|||
Labels: labels,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: count,
|
||||
Replicas: int32(count),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: labels},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -605,7 +605,7 @@ func (BasicReplicationController) Generate(genericParams map[string]interface{})
|
|||
Labels: labels,
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: count,
|
||||
Replicas: int32(count),
|
||||
Selector: labels,
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -680,11 +680,11 @@ func updatePodPorts(params map[string]string, podSpec *api.PodSpec) (err error)
|
|||
if port > 0 {
|
||||
podSpec.Containers[0].Ports = []api.ContainerPort{
|
||||
{
|
||||
ContainerPort: port,
|
||||
ContainerPort: int32(port),
|
||||
},
|
||||
}
|
||||
if hostPort > 0 {
|
||||
podSpec.Containers[0].Ports[0].HostPort = hostPort
|
||||
podSpec.Containers[0].Ports[0].HostPort = int32(hostPort)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -129,8 +129,8 @@ func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name s
|
|||
|
||||
// ValidateReplicationController ensures that the preconditions match. Returns nil if they are valid, an error otherwise
|
||||
func (precondition *ScalePrecondition) ValidateReplicationController(controller *api.ReplicationController) error {
|
||||
if precondition.Size != -1 && controller.Spec.Replicas != precondition.Size {
|
||||
return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(controller.Spec.Replicas)}
|
||||
if precondition.Size != -1 && int(controller.Spec.Replicas) != precondition.Size {
|
||||
return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(controller.Spec.Replicas))}
|
||||
}
|
||||
if len(precondition.ResourceVersion) != 0 && controller.ResourceVersion != precondition.ResourceVersion {
|
||||
return PreconditionError{"resource version", precondition.ResourceVersion, controller.ResourceVersion}
|
||||
|
@ -152,7 +152,7 @@ func (scaler *ReplicationControllerScaler) ScaleSimple(namespace, name string, p
|
|||
return err
|
||||
}
|
||||
}
|
||||
controller.Spec.Replicas = int(newSize)
|
||||
controller.Spec.Replicas = int32(newSize)
|
||||
// TODO: do retry on 409 errors here?
|
||||
if _, err := scaler.c.ReplicationControllers(namespace).Update(controller); err != nil {
|
||||
if errors.IsInvalid(err) {
|
||||
|
@ -191,8 +191,8 @@ func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize
|
|||
|
||||
// ValidateReplicaSet ensures that the preconditions match. Returns nil if they are valid, an error otherwise
|
||||
func (precondition *ScalePrecondition) ValidateReplicaSet(replicaSet *extensions.ReplicaSet) error {
|
||||
if precondition.Size != -1 && replicaSet.Spec.Replicas != precondition.Size {
|
||||
return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(replicaSet.Spec.Replicas)}
|
||||
if precondition.Size != -1 && int(replicaSet.Spec.Replicas) != precondition.Size {
|
||||
return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(replicaSet.Spec.Replicas))}
|
||||
}
|
||||
if len(precondition.ResourceVersion) != 0 && replicaSet.ResourceVersion != precondition.ResourceVersion {
|
||||
return PreconditionError{"resource version", precondition.ResourceVersion, replicaSet.ResourceVersion}
|
||||
|
@ -214,7 +214,7 @@ func (scaler *ReplicaSetScaler) ScaleSimple(namespace, name string, precondition
|
|||
return err
|
||||
}
|
||||
}
|
||||
rs.Spec.Replicas = int(newSize)
|
||||
rs.Spec.Replicas = int32(newSize)
|
||||
// TODO: do retry on 409 errors here?
|
||||
if _, err := scaler.c.ReplicaSets(namespace).Update(rs); err != nil {
|
||||
if errors.IsInvalid(err) {
|
||||
|
@ -256,8 +256,8 @@ func (precondition *ScalePrecondition) ValidateJob(job *batch.Job) error {
|
|||
if precondition.Size != -1 && job.Spec.Parallelism == nil {
|
||||
return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), "nil"}
|
||||
}
|
||||
if precondition.Size != -1 && *job.Spec.Parallelism != precondition.Size {
|
||||
return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(*job.Spec.Parallelism)}
|
||||
if precondition.Size != -1 && int(*job.Spec.Parallelism) != precondition.Size {
|
||||
return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(int(*job.Spec.Parallelism))}
|
||||
}
|
||||
if len(precondition.ResourceVersion) != 0 && job.ResourceVersion != precondition.ResourceVersion {
|
||||
return PreconditionError{"resource version", precondition.ResourceVersion, job.ResourceVersion}
|
||||
|
@ -280,7 +280,7 @@ func (scaler *JobScaler) ScaleSimple(namespace, name string, preconditions *Scal
|
|||
return err
|
||||
}
|
||||
}
|
||||
parallelism := int(newSize)
|
||||
parallelism := int32(newSize)
|
||||
job.Spec.Parallelism = ¶llelism
|
||||
if _, err := scaler.c.Jobs(namespace).Update(job); err != nil {
|
||||
if errors.IsInvalid(err) {
|
||||
|
@ -319,8 +319,8 @@ func (scaler *JobScaler) Scale(namespace, name string, newSize uint, preconditio
|
|||
|
||||
// ValidateDeployment ensures that the preconditions match. Returns nil if they are valid, an error otherwise.
|
||||
func (precondition *ScalePrecondition) ValidateDeployment(deployment *extensions.Deployment) error {
|
||||
if precondition.Size != -1 && deployment.Spec.Replicas != precondition.Size {
|
||||
return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(deployment.Spec.Replicas)}
|
||||
if precondition.Size != -1 && int(deployment.Spec.Replicas) != precondition.Size {
|
||||
return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(deployment.Spec.Replicas))}
|
||||
}
|
||||
if len(precondition.ResourceVersion) != 0 && deployment.ResourceVersion != precondition.ResourceVersion {
|
||||
return PreconditionError{"resource version", precondition.ResourceVersion, deployment.ResourceVersion}
|
||||
|
@ -346,7 +346,7 @@ func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, precondition
|
|||
|
||||
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
// For now I'm falling back to regular Deployment update operation.
|
||||
deployment.Spec.Replicas = int(newSize)
|
||||
deployment.Spec.Replicas = int32(newSize)
|
||||
if _, err := scaler.c.Deployments(namespace).Update(deployment); err != nil {
|
||||
if errors.IsInvalid(err) {
|
||||
return ScaleError{ScaleUpdateInvalidFailure, deployment.ResourceVersion, err}
|
||||
|
|
|
@ -107,7 +107,7 @@ func TestReplicationControllerScale(t *testing.T) {
|
|||
if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "replicationcontrollers" || action.GetName() != name {
|
||||
t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name)
|
||||
}
|
||||
if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "replicationcontrollers" || action.GetObject().(*api.ReplicationController).Spec.Replicas != int(count) {
|
||||
if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "replicationcontrollers" || action.GetObject().(*api.ReplicationController).Spec.Replicas != int32(count) {
|
||||
t.Errorf("unexpected action %v, expected update-replicationController with replicas = %d", actions[1], count)
|
||||
}
|
||||
}
|
||||
|
@ -261,7 +261,7 @@ func (c *ErrorJobs) Update(job *batch.Job) (*batch.Job, error) {
|
|||
}
|
||||
|
||||
func (c *ErrorJobs) Get(name string) (*batch.Job, error) {
|
||||
zero := 0
|
||||
zero := int32(0)
|
||||
return &batch.Job{
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: &zero,
|
||||
|
@ -317,7 +317,7 @@ func TestJobScale(t *testing.T) {
|
|||
if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "jobs" || action.GetName() != name {
|
||||
t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name)
|
||||
}
|
||||
if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "jobs" || *action.GetObject().(*batch.Job).Spec.Parallelism != int(count) {
|
||||
if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "jobs" || *action.GetObject().(*batch.Job).Spec.Parallelism != int32(count) {
|
||||
t.Errorf("unexpected action %v, expected update-job with parallelism = %d", actions[1], count)
|
||||
}
|
||||
}
|
||||
|
@ -342,7 +342,7 @@ func TestJobScaleInvalid(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJobScaleFailsPreconditions(t *testing.T) {
|
||||
ten := 10
|
||||
ten := int32(10)
|
||||
fake := testclient.NewSimpleFake(&batch.Job{
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: &ten,
|
||||
|
@ -364,7 +364,7 @@ func TestJobScaleFailsPreconditions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestValidateJob(t *testing.T) {
|
||||
zero, ten, twenty := 0, 10, 20
|
||||
zero, ten, twenty := int32(0), int32(10), int32(20)
|
||||
tests := []struct {
|
||||
preconditions ScalePrecondition
|
||||
job batch.Job
|
||||
|
@ -557,7 +557,7 @@ func TestDeploymentScale(t *testing.T) {
|
|||
if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "deployments" || action.GetName() != name {
|
||||
t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name)
|
||||
}
|
||||
if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "deployments" || action.GetObject().(*extensions.Deployment).Spec.Replicas != int(count) {
|
||||
if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "deployments" || action.GetObject().(*extensions.Deployment).Spec.Replicas != int32(count) {
|
||||
t.Errorf("unexpected action %v, expected update-deployment with replicas = %d", actions[1], count)
|
||||
}
|
||||
}
|
||||
|
@ -603,7 +603,7 @@ func TestDeploymentScaleFailsPreconditions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestValidateDeployment(t *testing.T) {
|
||||
zero, ten, twenty := 0, 10, 20
|
||||
zero, ten, twenty := int32(0), int32(10), int32(20)
|
||||
tests := []struct {
|
||||
preconditions ScalePrecondition
|
||||
deployment extensions.Deployment
|
||||
|
|
|
@ -136,7 +136,7 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) {
|
|||
}
|
||||
ports = append(ports, api.ServicePort{
|
||||
Name: name,
|
||||
Port: port,
|
||||
Port: int32(port),
|
||||
Protocol: api.Protocol(params["protocol"]),
|
||||
})
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) {
|
|||
// should be the same as Port
|
||||
for i := range service.Spec.Ports {
|
||||
port := service.Spec.Ports[i].Port
|
||||
service.Spec.Ports[i].TargetPort = intstr.FromInt(port)
|
||||
service.Spec.Ports[i].TargetPort = intstr.FromInt(int(port))
|
||||
}
|
||||
}
|
||||
if params["create-external-load-balancer"] == "true" {
|
||||
|
|
|
@ -367,7 +367,7 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati
|
|||
deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) {
|
||||
// set deployment's history and scale to 0
|
||||
// TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527
|
||||
d.Spec.RevisionHistoryLimit = util.IntPtr(0)
|
||||
d.Spec.RevisionHistoryLimit = util.Int32Ptr(0)
|
||||
d.Spec.Replicas = 0
|
||||
d.Spec.Paused = true
|
||||
})
|
||||
|
|
|
@ -379,7 +379,7 @@ func TestReplicaSetStop(t *testing.T) {
|
|||
func TestJobStop(t *testing.T) {
|
||||
name := "foo"
|
||||
ns := "default"
|
||||
zero := 0
|
||||
zero := int32(0)
|
||||
tests := []struct {
|
||||
Name string
|
||||
Objs []runtime.Object
|
||||
|
|
|
@ -44,13 +44,13 @@ func FromServices(services *api.ServiceList) []api.EnvVar {
|
|||
result = append(result, api.EnvVar{Name: name, Value: service.Spec.ClusterIP})
|
||||
// First port - give it the backwards-compatible name
|
||||
name = makeEnvVariableName(service.Name) + "_SERVICE_PORT"
|
||||
result = append(result, api.EnvVar{Name: name, Value: strconv.Itoa(service.Spec.Ports[0].Port)})
|
||||
result = append(result, api.EnvVar{Name: name, Value: strconv.Itoa(int(service.Spec.Ports[0].Port))})
|
||||
// All named ports (only the first may be unnamed, checked in validation)
|
||||
for i := range service.Spec.Ports {
|
||||
sp := &service.Spec.Ports[i]
|
||||
if sp.Name != "" {
|
||||
pn := name + "_" + makeEnvVariableName(sp.Name)
|
||||
result = append(result, api.EnvVar{Name: pn, Value: strconv.Itoa(sp.Port)})
|
||||
result = append(result, api.EnvVar{Name: pn, Value: strconv.Itoa(int(sp.Port))})
|
||||
}
|
||||
}
|
||||
// Docker-compatible vars.
|
||||
|
@ -96,7 +96,7 @@ func makeLinkVariables(service *api.Service) []api.EnvVar {
|
|||
},
|
||||
{
|
||||
Name: portPrefix + "_PORT",
|
||||
Value: strconv.Itoa(sp.Port),
|
||||
Value: strconv.Itoa(int(sp.Port)),
|
||||
},
|
||||
{
|
||||
Name: portPrefix + "_ADDR",
|
||||
|
|
|
@ -1353,8 +1353,8 @@ func makePortMappings(container *api.Container) (ports []kubecontainer.PortMappi
|
|||
names := make(map[string]struct{})
|
||||
for _, p := range container.Ports {
|
||||
pm := kubecontainer.PortMapping{
|
||||
HostPort: p.HostPort,
|
||||
ContainerPort: p.ContainerPort,
|
||||
HostPort: int(p.HostPort),
|
||||
ContainerPort: int(p.ContainerPort),
|
||||
Protocol: p.Protocol,
|
||||
HostIP: p.HostIP,
|
||||
}
|
||||
|
@ -3506,7 +3506,7 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain
|
|||
cid := cs.ID.String()
|
||||
status := &api.ContainerStatus{
|
||||
Name: cs.Name,
|
||||
RestartCount: cs.RestartCount,
|
||||
RestartCount: int32(cs.RestartCount),
|
||||
Image: cs.Image,
|
||||
ImageID: cs.ImageID,
|
||||
ContainerID: cid,
|
||||
|
@ -3516,7 +3516,7 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain
|
|||
status.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.NewTime(cs.StartedAt)}
|
||||
case kubecontainer.ContainerStateExited:
|
||||
status.State.Terminated = &api.ContainerStateTerminated{
|
||||
ExitCode: cs.ExitCode,
|
||||
ExitCode: int32(cs.ExitCode),
|
||||
Reason: cs.Reason,
|
||||
Message: cs.Message,
|
||||
StartedAt: unversioned.NewTime(cs.StartedAt),
|
||||
|
|
|
@ -78,7 +78,7 @@ func resolvePort(portReference intstr.IntOrString, container *api.Container) (in
|
|||
}
|
||||
for _, portSpec := range container.Ports {
|
||||
if portSpec.Name == portName {
|
||||
return portSpec.ContainerPort, nil
|
||||
return int(portSpec.ContainerPort), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container)
|
||||
|
|
|
@ -43,7 +43,7 @@ func TestResolvePortString(t *testing.T) {
|
|||
name := "foo"
|
||||
container := &api.Container{
|
||||
Ports: []api.ContainerPort{
|
||||
{Name: name, ContainerPort: expected},
|
||||
{Name: name, ContainerPort: int32(expected)},
|
||||
},
|
||||
}
|
||||
port, err := resolvePort(intstr.FromString(name), container)
|
||||
|
@ -56,7 +56,7 @@ func TestResolvePortString(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestResolvePortStringUnknown(t *testing.T) {
|
||||
expected := 80
|
||||
expected := int32(80)
|
||||
name := "foo"
|
||||
container := &api.Container{
|
||||
Ports: []api.ContainerPort{
|
||||
|
|
|
@ -198,7 +198,7 @@ func extractPort(param intstr.IntOrString, container api.Container) (int, error)
|
|||
func findPortByName(container api.Container, portName string) (int, error) {
|
||||
for _, port := range container.Ports {
|
||||
if port.Name == portName {
|
||||
return port.ContainerPort, nil
|
||||
return int(port.ContainerPort), nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("port %s not found", portName)
|
||||
|
|
|
@ -188,7 +188,7 @@ func (w *worker) doProbe() (keepGoing bool) {
|
|||
w.pod.Spec.RestartPolicy != api.RestartPolicyNever
|
||||
}
|
||||
|
||||
if int(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds {
|
||||
if int32(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds {
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -205,8 +205,8 @@ func (w *worker) doProbe() (keepGoing bool) {
|
|||
w.resultRun = 1
|
||||
}
|
||||
|
||||
if (result == results.Failure && w.resultRun < w.spec.FailureThreshold) ||
|
||||
(result == results.Success && w.resultRun < w.spec.SuccessThreshold) {
|
||||
if (result == results.Failure && w.resultRun < int(w.spec.FailureThreshold)) ||
|
||||
(result == results.Success && w.resultRun < int(w.spec.SuccessThreshold)) {
|
||||
// Success or failure is below threshold - leave the probe state unchanged.
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ func NewHollowProxyOrDie(
|
|||
) *HollowProxy {
|
||||
// Create and start Hollow Proxy
|
||||
config := options.NewProxyConfig()
|
||||
config.OOMScoreAdj = util.IntPtr(0)
|
||||
config.OOMScoreAdj = util.Int32Ptr(0)
|
||||
config.ResourceContainer = ""
|
||||
config.NodeRef = &api.ObjectReference{
|
||||
Kind: "Node",
|
||||
|
|
|
@ -158,12 +158,12 @@ func createPortAndServiceSpec(servicePort int, nodePort int, servicePortName str
|
|||
//Use the Cluster IP type for the service port if NodePort isn't provided.
|
||||
//Otherwise, we will be binding the master service to a NodePort.
|
||||
servicePorts := []api.ServicePort{{Protocol: api.ProtocolTCP,
|
||||
Port: servicePort,
|
||||
Port: int32(servicePort),
|
||||
Name: servicePortName,
|
||||
TargetPort: intstr.FromInt(servicePort)}}
|
||||
serviceType := api.ServiceTypeClusterIP
|
||||
if nodePort > 0 {
|
||||
servicePorts[0].NodePort = nodePort
|
||||
servicePorts[0].NodePort = int32(nodePort)
|
||||
serviceType = api.ServiceTypeNodePort
|
||||
}
|
||||
if extraServicePorts != nil {
|
||||
|
@ -175,7 +175,7 @@ func createPortAndServiceSpec(servicePort int, nodePort int, servicePortName str
|
|||
// createEndpointPortSpec creates an array of endpoint ports
|
||||
func createEndpointPortSpec(endpointPort int, endpointPortName string, extraEndpointPorts []api.EndpointPort) []api.EndpointPort {
|
||||
endpointPorts := []api.EndpointPort{{Protocol: api.ProtocolTCP,
|
||||
Port: endpointPort,
|
||||
Port: int32(endpointPort),
|
||||
Name: endpointPortName,
|
||||
}}
|
||||
if extraEndpointPorts != nil {
|
||||
|
|
|
@ -285,8 +285,8 @@ func TestControllerServicePorts(t *testing.T) {
|
|||
|
||||
controller := master.NewBootstrapController()
|
||||
|
||||
assert.Equal(1000, controller.ExtraServicePorts[0].Port)
|
||||
assert.Equal(1010, controller.ExtraServicePorts[1].Port)
|
||||
assert.Equal(int32(1000), controller.ExtraServicePorts[0].Port)
|
||||
assert.Equal(int32(1010), controller.ExtraServicePorts[1].Port)
|
||||
}
|
||||
|
||||
// TestGetNodeAddresses verifies that proper results are returned
|
||||
|
|
|
@ -94,14 +94,14 @@ func (g *MetricsGrabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error
|
|||
return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items)
|
||||
}
|
||||
kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port
|
||||
return g.grabFromKubeletInternal(nodeName, kubeletPort)
|
||||
return g.grabFromKubeletInternal(nodeName, int(kubeletPort))
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (KubeletMetrics, error) {
|
||||
if kubeletPort <= 0 || kubeletPort > 65535 {
|
||||
return KubeletMetrics{}, fmt.Errorf("Invalid Kubelet port %v. Skipping Kubelet's metrics gathering.", kubeletPort)
|
||||
}
|
||||
output, err := g.getMetricsFromNode(nodeName, kubeletPort)
|
||||
output, err := g.getMetricsFromNode(nodeName, int(kubeletPort))
|
||||
if err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ func (g *MetricsGrabber) Grab(unknownMetrics sets.String) (MetricsCollection, er
|
|||
} else {
|
||||
for _, node := range nodes.Items {
|
||||
kubeletPort := node.Status.DaemonEndpoints.KubeletEndpoint.Port
|
||||
metrics, err := g.grabFromKubeletInternal(node.Name, kubeletPort)
|
||||
metrics, err := g.grabFromKubeletInternal(node.Name, int(kubeletPort))
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
|
|
@ -330,7 +330,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
|
|||
}
|
||||
|
||||
func (proxier *Proxier) sameConfig(info *serviceInfo, service *api.Service, port *api.ServicePort) bool {
|
||||
if info.protocol != port.Protocol || info.port != port.Port || info.nodePort != port.NodePort {
|
||||
if info.protocol != port.Protocol || info.port != int(port.Port) || info.nodePort != int(port.NodePort) {
|
||||
return false
|
||||
}
|
||||
if !info.clusterIP.Equal(net.ParseIP(service.Spec.ClusterIP)) {
|
||||
|
@ -426,9 +426,9 @@ func (proxier *Proxier) OnServiceUpdate(allServices []api.Service) {
|
|||
glog.V(1).Infof("Adding new service %q at %s:%d/%s", serviceName, serviceIP, servicePort.Port, servicePort.Protocol)
|
||||
info = newServiceInfo(serviceName)
|
||||
info.clusterIP = serviceIP
|
||||
info.port = servicePort.Port
|
||||
info.port = int(servicePort.Port)
|
||||
info.protocol = servicePort.Protocol
|
||||
info.nodePort = servicePort.NodePort
|
||||
info.nodePort = int(servicePort.NodePort)
|
||||
info.externalIPs = service.Spec.ExternalIPs
|
||||
// Deep-copy in case the service instance changes
|
||||
info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
|
||||
|
@ -483,7 +483,7 @@ func (proxier *Proxier) OnEndpointsUpdate(allEndpoints []api.Endpoints) {
|
|||
port := &ss.Ports[i]
|
||||
for i := range ss.Addresses {
|
||||
addr := &ss.Addresses[i]
|
||||
portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, port.Port})
|
||||
portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, int(port.Port)})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -419,11 +419,11 @@ func (proxier *Proxier) OnServiceUpdate(services []api.Service) {
|
|||
continue
|
||||
}
|
||||
info.portal.ip = serviceIP
|
||||
info.portal.port = servicePort.Port
|
||||
info.portal.port = int(servicePort.Port)
|
||||
info.externalIPs = service.Spec.ExternalIPs
|
||||
// Deep-copy in case the service instance changes
|
||||
info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
|
||||
info.nodePort = servicePort.NodePort
|
||||
info.nodePort = int(servicePort.NodePort)
|
||||
info.sessionAffinityType = service.Spec.SessionAffinity
|
||||
glog.V(4).Infof("info: %+v", info)
|
||||
|
||||
|
@ -452,7 +452,7 @@ func (proxier *Proxier) OnServiceUpdate(services []api.Service) {
|
|||
}
|
||||
|
||||
func sameConfig(info *serviceInfo, service *api.Service, port *api.ServicePort) bool {
|
||||
if info.protocol != port.Protocol || info.portal.port != port.Port || info.nodePort != port.NodePort {
|
||||
if info.protocol != port.Protocol || info.portal.port != int(port.Port) || info.nodePort != int(port.NodePort) {
|
||||
return false
|
||||
}
|
||||
if !info.portal.ip.Equal(net.ParseIP(service.Spec.ClusterIP)) {
|
||||
|
|
|
@ -82,8 +82,8 @@ func waitForClosedPortUDP(p *Proxier, proxyPort int) error {
|
|||
return fmt.Errorf("port %d still open", proxyPort)
|
||||
}
|
||||
|
||||
var tcpServerPort int
|
||||
var udpServerPort int
|
||||
var tcpServerPort int32
|
||||
var udpServerPort int32
|
||||
|
||||
func init() {
|
||||
// Don't handle panics
|
||||
|
@ -103,10 +103,11 @@ func init() {
|
|||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
tcpServerPort, err = strconv.Atoi(port)
|
||||
tcpServerPortValue, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
|
||||
}
|
||||
tcpServerPort = int32(tcpServerPortValue)
|
||||
|
||||
// UDP setup.
|
||||
udp, err := newUDPEchoServer()
|
||||
|
@ -117,10 +118,11 @@ func init() {
|
|||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
udpServerPort, err = strconv.Atoi(port)
|
||||
udpServerPortValue, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
|
||||
}
|
||||
udpServerPort = int32(udpServerPortValue)
|
||||
go udp.Loop()
|
||||
}
|
||||
|
||||
|
@ -564,7 +566,7 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: svcInfo.proxyPort,
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
|
@ -616,7 +618,7 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: svcInfo.proxyPort,
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
}})
|
||||
|
@ -752,7 +754,7 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
|
|||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: svcInfo.portal.port,
|
||||
Port: int32(svcInfo.portal.port),
|
||||
Protocol: "TCP",
|
||||
}},
|
||||
ClusterIP: svcInfo.portal.ip.String(),
|
||||
|
@ -803,7 +805,7 @@ func TestProxyUpdatePortal(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: svcInfo.proxyPort,
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
|
@ -816,7 +818,7 @@ func TestProxyUpdatePortal(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: svcInfo.proxyPort,
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
|
@ -829,7 +831,7 @@ func TestProxyUpdatePortal(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: svcInfo.proxyPort,
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
|
|
|
@ -244,7 +244,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(allEndpoints []api.Endpoints) {
|
|||
port := &ss.Ports[i]
|
||||
for i := range ss.Addresses {
|
||||
addr := &ss.Addresses[i]
|
||||
portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, port.Port})
|
||||
portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, int(port.Port)})
|
||||
// Ignore the protocol field - we'll get that from the Service objects.
|
||||
}
|
||||
}
|
||||
|
|
|
@ -281,7 +281,7 @@ func TestScaleUpdate(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("error setting new replication controller %v: %v", *validController, err)
|
||||
}
|
||||
replicas := 12
|
||||
replicas := int32(12)
|
||||
update := autoscaling.Scale{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace},
|
||||
Spec: autoscaling.ScaleSpec{
|
||||
|
|
|
@ -105,7 +105,7 @@ func (rcStrategy) AllowUnconditionalUpdate() bool {
|
|||
func ControllerToSelectableFields(controller *api.ReplicationController) fields.Set {
|
||||
objectMetaFieldsSet := generic.ObjectMetaFieldsSet(controller.ObjectMeta, true)
|
||||
controllerSpecificFieldsSet := fields.Set{
|
||||
"status.replicas": strconv.Itoa(controller.Status.Replicas),
|
||||
"status.replicas": strconv.Itoa(int(controller.Status.Replicas)),
|
||||
}
|
||||
return generic.MergeFieldsSets(objectMetaFieldsSet, controllerSpecificFieldsSet)
|
||||
}
|
||||
|
|
|
@ -225,7 +225,7 @@ func TestScaleUpdate(t *testing.T) {
|
|||
if err := storage.Deployment.Storage.Create(ctx, key, &validDeployment, &deployment, 0); err != nil {
|
||||
t.Fatalf("error setting new deployment (key: %s) %v: %v", key, validDeployment, err)
|
||||
}
|
||||
replicas := 12
|
||||
replicas := int32(12)
|
||||
update := extensions.Scale{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace},
|
||||
Spec: extensions.ScaleSpec{
|
||||
|
|
|
@ -54,7 +54,7 @@ var validPodTemplate = api.PodTemplate{
|
|||
},
|
||||
}
|
||||
|
||||
var validReplicas = 8
|
||||
var validReplicas = int32(8)
|
||||
|
||||
var validControllerSpec = api.ReplicationControllerSpec{
|
||||
Replicas: validReplicas,
|
||||
|
@ -108,7 +108,7 @@ func TestUpdate(t *testing.T) {
|
|||
if err := si.Create(ctx, key, &validController, nil, 0); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
replicas := 12
|
||||
replicas := int32(12)
|
||||
update := extensions.Scale{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test"},
|
||||
Spec: extensions.ScaleSpec{
|
||||
|
|
|
@ -39,8 +39,8 @@ func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer)
|
|||
}
|
||||
|
||||
func validNewJob() *batch.Job {
|
||||
completions := 1
|
||||
parallelism := 1
|
||||
completions := int32(1)
|
||||
parallelism := int32(1)
|
||||
return &batch.Job{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
|
@ -97,7 +97,7 @@ func TestUpdate(t *testing.T) {
|
|||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
test := registrytest.New(t, storage.Store)
|
||||
two := 2
|
||||
two := int32(2)
|
||||
test.TestUpdate(
|
||||
// valid
|
||||
validNewJob(),
|
||||
|
|
|
@ -158,7 +158,7 @@ func (jobStatusStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object
|
|||
func JobToSelectableFields(job *batch.Job) fields.Set {
|
||||
objectMetaFieldsSet := generic.ObjectMetaFieldsSet(job.ObjectMeta, true)
|
||||
specificFieldsSet := fields.Set{
|
||||
"status.successful": strconv.Itoa(job.Status.Succeeded),
|
||||
"status.successful": strconv.Itoa(int(job.Status.Succeeded)),
|
||||
}
|
||||
return generic.MergeFieldsSets(objectMetaFieldsSet, specificFieldsSet)
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ func TestJobStrategy(t *testing.T) {
|
|||
if len(errs) != 0 {
|
||||
t.Errorf("Unexpected error validating %v", errs)
|
||||
}
|
||||
parallelism := 10
|
||||
parallelism := int32(10)
|
||||
updatedJob := &batch.Job{
|
||||
ObjectMeta: api.ObjectMeta{Name: "bar", ResourceVersion: "4"},
|
||||
Spec: batch.JobSpec{
|
||||
|
@ -173,8 +173,8 @@ func TestJobStatusStrategy(t *testing.T) {
|
|||
Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||
},
|
||||
}
|
||||
oldParallelism := 10
|
||||
newParallelism := 11
|
||||
oldParallelism := int32(10)
|
||||
newParallelism := int32(11)
|
||||
oldJob := &batch.Job{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "myjob",
|
||||
|
|
|
@ -123,7 +123,7 @@ func (r *REST) getKubeletPort(ctx api.Context, nodeName string) (int, error) {
|
|||
if !ok {
|
||||
return 0, fmt.Errorf("Unexpected object type: %#v", node)
|
||||
}
|
||||
return node.Status.DaemonEndpoints.KubeletEndpoint.Port, nil
|
||||
return int(node.Status.DaemonEndpoints.KubeletEndpoint.Port), nil
|
||||
}
|
||||
|
||||
func (c *REST) GetConnectionInfo(ctx api.Context, nodeName string) (string, uint, http.RoundTripper, error) {
|
||||
|
|
|
@ -186,7 +186,7 @@ func ResourceLocation(getter ResourceGetter, connection client.ConnectionInfoGet
|
|||
if kubeletPort == 0 {
|
||||
kubeletPort = ports.KubeletPort
|
||||
}
|
||||
if portReq == "" || strconv.Itoa(kubeletPort) == portReq {
|
||||
if portReq == "" || strconv.Itoa(int(kubeletPort)) == portReq {
|
||||
scheme, port, kubeletTransport, err := connection.GetConnectionInfo(ctx, node.Name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
|
|
@ -295,7 +295,7 @@ func TestScaleUpdate(t *testing.T) {
|
|||
Namespace: api.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.ScaleSpec{
|
||||
Replicas: replicas,
|
||||
Replicas: int32(replicas),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -308,7 +308,7 @@ func TestScaleUpdate(t *testing.T) {
|
|||
t.Fatalf("error fetching scale for %s: %v", name, err)
|
||||
}
|
||||
scale := obj.(*extensions.Scale)
|
||||
if scale.Spec.Replicas != replicas {
|
||||
if scale.Spec.Replicas != int32(replicas) {
|
||||
t.Errorf("wrong replicas count expected: %d got: %d", replicas, scale.Spec.Replicas)
|
||||
}
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ func (rsStrategy) AllowUnconditionalUpdate() bool {
|
|||
func ReplicaSetToSelectableFields(rs *extensions.ReplicaSet) fields.Set {
|
||||
objectMetaFieldsSet := generic.ObjectMetaFieldsSet(rs.ObjectMeta, true)
|
||||
rsSpecificFieldsSet := fields.Set{
|
||||
"status.replicas": strconv.Itoa(rs.Status.Replicas),
|
||||
"status.replicas": strconv.Itoa(int(rs.Status.Replicas)),
|
||||
}
|
||||
return generic.MergeFieldsSets(objectMetaFieldsSet, rsSpecificFieldsSet)
|
||||
}
|
||||
|
|
|
@ -115,7 +115,7 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err
|
|||
for i := range service.Spec.Ports {
|
||||
servicePort := &service.Spec.Ports[i]
|
||||
if servicePort.NodePort != 0 {
|
||||
err := nodePortOp.Allocate(servicePort.NodePort)
|
||||
err := nodePortOp.Allocate(int(servicePort.NodePort))
|
||||
if err != nil {
|
||||
// TODO: when validation becomes versioned, this gets more complicated.
|
||||
el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), servicePort.NodePort, err.Error())}
|
||||
|
@ -129,7 +129,7 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err
|
|||
// not really an internal error.
|
||||
return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err))
|
||||
}
|
||||
servicePort.NodePort = nodePort
|
||||
servicePort.NodePort = int32(nodePort)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -240,7 +240,7 @@ func (rs *REST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, boo
|
|||
if assignNodePorts {
|
||||
for i := range service.Spec.Ports {
|
||||
servicePort := &service.Spec.Ports[i]
|
||||
nodePort := servicePort.NodePort
|
||||
nodePort := int(servicePort.NodePort)
|
||||
if nodePort != 0 {
|
||||
if !contains(oldNodePorts, nodePort) {
|
||||
err := nodePortOp.Allocate(nodePort)
|
||||
|
@ -257,7 +257,7 @@ func (rs *REST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, boo
|
|||
// not really an internal error.
|
||||
return nil, false, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err))
|
||||
}
|
||||
servicePort.NodePort = nodePort
|
||||
servicePort.NodePort = int32(nodePort)
|
||||
}
|
||||
// Detect duplicate node ports; this should have been caught by validation, so we panic
|
||||
if contains(newNodePorts, nodePort) {
|
||||
|
@ -316,7 +316,7 @@ func (rs *REST) ResourceLocation(ctx api.Context, id string) (*url.URL, http.Rou
|
|||
}
|
||||
found := false
|
||||
for _, svcPort := range svc.Spec.Ports {
|
||||
if svcPort.Port == int(portNum) {
|
||||
if int64(svcPort.Port) == portNum {
|
||||
// use the declared port's name
|
||||
portStr = svcPort.Name
|
||||
found = true
|
||||
|
@ -347,7 +347,7 @@ func (rs *REST) ResourceLocation(ctx api.Context, id string) (*url.URL, http.Rou
|
|||
if ss.Ports[i].Name == portStr {
|
||||
// Pick a random address.
|
||||
ip := ss.Addresses[rand.Intn(len(ss.Addresses))].IP
|
||||
port := ss.Ports[i].Port
|
||||
port := int(ss.Ports[i].Port)
|
||||
return &url.URL{
|
||||
Scheme: svcScheme,
|
||||
Host: net.JoinHostPort(ip, strconv.Itoa(port)),
|
||||
|
@ -374,7 +374,7 @@ func CollectServiceNodePorts(service *api.Service) []int {
|
|||
for i := range service.Spec.Ports {
|
||||
servicePort := &service.Spec.Ports[i]
|
||||
if servicePort.NodePort != 0 {
|
||||
servicePorts = append(servicePorts, servicePort.NodePort)
|
||||
servicePorts = append(servicePorts, int(servicePort.NodePort))
|
||||
}
|
||||
}
|
||||
return servicePorts
|
||||
|
|
|
@ -270,7 +270,7 @@ func emptySubsets() []api.EndpointSubset {
|
|||
func makeSubsets(ip string, port int) []api.EndpointSubset {
|
||||
return []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: ip}},
|
||||
Ports: []api.EndpointPort{{Port: port}},
|
||||
Ports: []api.EndpointPort{{Port: int32(port)}},
|
||||
}}
|
||||
}
|
||||
|
||||
|
|
|
@ -237,8 +237,8 @@ func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.P
|
|||
}
|
||||
|
||||
// Returns the sum of Replicas of the given replica sets.
|
||||
func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int {
|
||||
totalReplicaCount := 0
|
||||
func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
|
||||
totalReplicaCount := int32(0)
|
||||
for _, rs := range replicaSets {
|
||||
if rs != nil {
|
||||
totalReplicaCount += rs.Spec.Replicas
|
||||
|
@ -248,8 +248,8 @@ func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int {
|
|||
}
|
||||
|
||||
// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets.
|
||||
func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int {
|
||||
totalReplicaCount := 0
|
||||
func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
|
||||
totalReplicaCount := int32(0)
|
||||
for _, rs := range replicaSets {
|
||||
if rs != nil {
|
||||
totalReplicaCount += rs.Status.Replicas
|
||||
|
@ -259,7 +259,7 @@ func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) i
|
|||
}
|
||||
|
||||
// Returns the number of available pods corresponding to the given replica sets.
|
||||
func GetAvailablePodsForReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int) (int, error) {
|
||||
func GetAvailablePodsForReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int32) (int32, error) {
|
||||
allPods, err := GetPodsForReplicaSets(c, rss)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
@ -267,8 +267,8 @@ func GetAvailablePodsForReplicaSets(c clientset.Interface, rss []*extensions.Rep
|
|||
return getReadyPodsCount(allPods, minReadySeconds), nil
|
||||
}
|
||||
|
||||
func getReadyPodsCount(pods []api.Pod, minReadySeconds int) int {
|
||||
readyPodCount := 0
|
||||
func getReadyPodsCount(pods []api.Pod, minReadySeconds int32) int32 {
|
||||
readyPodCount := int32(0)
|
||||
for _, pod := range pods {
|
||||
if IsPodAvailable(&pod, minReadySeconds) {
|
||||
readyPodCount++
|
||||
|
@ -277,7 +277,7 @@ func getReadyPodsCount(pods []api.Pod, minReadySeconds int) int {
|
|||
return readyPodCount
|
||||
}
|
||||
|
||||
func IsPodAvailable(pod *api.Pod, minReadySeconds int) bool {
|
||||
func IsPodAvailable(pod *api.Pod, minReadySeconds int32) bool {
|
||||
if !controller.IsPodActive(*pod) {
|
||||
return false
|
||||
}
|
||||
|
@ -340,17 +340,17 @@ func IsRollingUpdate(deployment *extensions.Deployment) bool {
|
|||
// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it.
|
||||
// 1) The new RS is saturated: newRS's replicas == deployment's replicas
|
||||
// 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas
|
||||
func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int, error) {
|
||||
func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int32, error) {
|
||||
switch deployment.Spec.Strategy.Type {
|
||||
case extensions.RollingUpdateDeploymentStrategyType:
|
||||
// Check if we can scale up.
|
||||
maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Replicas, true)
|
||||
maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(deployment.Spec.Replicas), true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Find the total number of pods
|
||||
currentPodCount := GetReplicaCountForReplicaSets(allRSs)
|
||||
maxTotalPods := deployment.Spec.Replicas + maxSurge
|
||||
maxTotalPods := deployment.Spec.Replicas + int32(maxSurge)
|
||||
if currentPodCount >= maxTotalPods {
|
||||
// Cannot scale up.
|
||||
return newRS.Spec.Replicas, nil
|
||||
|
@ -358,7 +358,7 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re
|
|||
// Scale up.
|
||||
scaleUpCount := maxTotalPods - currentPodCount
|
||||
// Do not exceed the number of desired replicas.
|
||||
scaleUpCount = integer.IntMin(scaleUpCount, deployment.Spec.Replicas-newRS.Spec.Replicas)
|
||||
scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(deployment.Spec.Replicas-newRS.Spec.Replicas)))
|
||||
return newRS.Spec.Replicas + scaleUpCount, nil
|
||||
case extensions.RecreateDeploymentStrategyType:
|
||||
return deployment.Spec.Replicas, nil
|
||||
|
@ -389,12 +389,12 @@ func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment,
|
|||
// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1)
|
||||
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
|
||||
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
|
||||
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int) (int, int, error) {
|
||||
surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, desired, true)
|
||||
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
|
||||
surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, desired, false)
|
||||
unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
@ -407,5 +407,5 @@ func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired
|
|||
unavailable = 1
|
||||
}
|
||||
|
||||
return surge, unavailable, nil
|
||||
return int32(surge), int32(unavailable), nil
|
||||
}
|
||||
|
|
|
@ -124,7 +124,7 @@ func TestGetReadyPodsCount(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if count := getReadyPodsCount(test.pods, test.minReadySeconds); count != test.expected {
|
||||
if count := getReadyPodsCount(test.pods, int32(test.minReadySeconds)); int(count) != test.expected {
|
||||
t.Errorf("Pods = %#v, minReadySeconds = %d, expected %d, got %d", test.pods, test.minReadySeconds, test.expected, count)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -122,6 +122,12 @@ func IntPtr(i int) *int {
|
|||
return &o
|
||||
}
|
||||
|
||||
// Int32Ptr returns a pointer to an int32
|
||||
func Int32Ptr(i int32) *int32 {
|
||||
o := i
|
||||
return &o
|
||||
}
|
||||
|
||||
// IntPtrDerefOr dereference the int ptr and returns it i not nil,
|
||||
// else returns def.
|
||||
func IntPtrDerefOr(ptr *int, def int) int {
|
||||
|
@ -130,3 +136,12 @@ func IntPtrDerefOr(ptr *int, def int) int {
|
|||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil,
|
||||
// else returns def.
|
||||
func Int32PtrDerefOr(ptr *int32, def int32) int32 {
|
||||
if ptr != nil {
|
||||
return *ptr
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec,
|
|||
fsType := ebs.FSType
|
||||
partition := ""
|
||||
if ebs.Partition != 0 {
|
||||
partition = strconv.Itoa(ebs.Partition)
|
||||
partition = strconv.Itoa(int(ebs.Partition))
|
||||
}
|
||||
|
||||
return &awsElasticBlockStoreMounter{
|
||||
|
|
|
@ -92,7 +92,7 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
|
|||
return nil, fmt.Errorf("empty lun")
|
||||
}
|
||||
|
||||
lun := strconv.Itoa(*fc.Lun)
|
||||
lun := strconv.Itoa(int(*fc.Lun))
|
||||
|
||||
return &fcDiskMounter{
|
||||
fcDisk: &fcDisk{
|
||||
|
|
|
@ -199,7 +199,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
}
|
||||
|
||||
func TestPluginVolume(t *testing.T) {
|
||||
lun := 0
|
||||
lun := int32(0)
|
||||
vol := &api.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: api.VolumeSource{
|
||||
|
@ -214,7 +214,7 @@ func TestPluginVolume(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPluginPersistentVolume(t *testing.T) {
|
||||
lun := 0
|
||||
lun := int32(0)
|
||||
vol := &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "vol1",
|
||||
|
@ -239,7 +239,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
|||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
lun := 0
|
||||
lun := int32(0)
|
||||
pv := &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "pvA",
|
||||
|
|
|
@ -94,7 +94,7 @@ func (plugin *gcePersistentDiskPlugin) newMounterInternal(spec *volume.Spec, pod
|
|||
fsType := gce.FSType
|
||||
partition := ""
|
||||
if gce.Partition != 0 {
|
||||
partition = strconv.Itoa(gce.Partition)
|
||||
partition = strconv.Itoa(int(gce.Partition))
|
||||
}
|
||||
|
||||
return &gcePersistentDiskMounter{
|
||||
|
|
|
@ -89,7 +89,7 @@ func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UI
|
|||
readOnly = spec.ReadOnly
|
||||
}
|
||||
|
||||
lun := strconv.Itoa(iscsi.Lun)
|
||||
lun := strconv.Itoa(int(iscsi.Lun))
|
||||
portal := portalMounter(iscsi.TargetPortal)
|
||||
|
||||
iface := iscsi.ISCSIInterface
|
||||
|
|
|
@ -50,7 +50,7 @@ func NewSchedulerServer() *SchedulerServer {
|
|||
|
||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||
func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&s.Port, "port", s.Port, "The port that the scheduler's http service runs on")
|
||||
fs.Int32Var(&s.Port, "port", s.Port, "The port that the scheduler's http service runs on")
|
||||
fs.StringVar(&s.Address, "address", s.Address, "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.StringVar(&s.AlgorithmProvider, "algorithm-provider", s.AlgorithmProvider, "The scheduling algorithm provider to use, one of: "+factory.ListAlgorithmProviders())
|
||||
fs.StringVar(&s.PolicyConfigFile, "policy-config-file", s.PolicyConfigFile, "File with scheduler policy configuration")
|
||||
|
@ -60,12 +60,12 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) {
|
|||
var unusedBindPodsQPS float32
|
||||
fs.Float32Var(&unusedBindPodsQPS, "bind-pods-qps", 0, "unused, use --kube-api-qps")
|
||||
fs.MarkDeprecated("bind-pods-qps", "flag is unused and will be removed. Use kube-api-qps instead.")
|
||||
var unusedBindPodsBurst int
|
||||
fs.IntVar(&unusedBindPodsBurst, "bind-pods-burst", 0, "unused, use --kube-api-burst")
|
||||
var unusedBindPodsBurst int32
|
||||
fs.Int32Var(&unusedBindPodsBurst, "bind-pods-burst", 0, "unused, use --kube-api-burst")
|
||||
fs.MarkDeprecated("bind-pods-burst", "flag is unused and will be removed. Use kube-api-burst instead.")
|
||||
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.")
|
||||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
||||
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.StringVar(&s.SchedulerName, "scheduler-name", s.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's annotation with key 'scheduler.alpha.kubernetes.io/name'")
|
||||
leaderelection.BindFlags(&s.LeaderElection, fs)
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ func Run(s *options.SchedulerServer) error {
|
|||
kubeconfig.ContentType = s.ContentType
|
||||
// Override kubeconfig qps/burst settings from flags
|
||||
kubeconfig.QPS = s.KubeAPIQPS
|
||||
kubeconfig.Burst = s.KubeAPIBurst
|
||||
kubeconfig.Burst = int(s.KubeAPIBurst)
|
||||
|
||||
kubeClient, err := client.New(kubeconfig)
|
||||
if err != nil {
|
||||
|
@ -101,7 +101,7 @@ func Run(s *options.SchedulerServer) error {
|
|||
mux.Handle("/metrics", prometheus.Handler())
|
||||
|
||||
server := &http.Server{
|
||||
Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)),
|
||||
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
|
||||
Handler: mux,
|
||||
}
|
||||
glog.Fatal(server.ListenAndServe())
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue