Merge pull request #8704 from roberthbailey/dead-flags

Deprecate flags for nodecontroller
pull/6/head
Saad Ali 2015-05-26 09:54:01 -07:00
commit c5525ecfdc
5 changed files with 29 additions and 76 deletions

View File

@ -37,7 +37,6 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
apierrors "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
@ -189,15 +188,9 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
// TODO: Write an integration test for the replication controllers watch.
go controllerManager.Run(3, util.NeverStop)
nodeResources := &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
}}
nodeController := nodecontroller.NewNodeController(nil, "", nodeResources, cl, 10, 5*time.Minute, util.NewFakeRateLimiter(),
nodeController := nodecontroller.NewNodeController(nil, cl, 10, 5*time.Minute, util.NewFakeRateLimiter(),
40*time.Second, 60*time.Second, 5*time.Second, nil, false)
nodeController.Run(5*time.Second, true)
nodeController.Run(5 * time.Second)
cadvisorInterface := new(cadvisor.Fake)
// Kubelet (localhost)

View File

@ -26,7 +26,6 @@ import (
"strconv"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
@ -58,15 +57,11 @@ type CMServer struct {
CloudConfigFile string
ConcurrentEndpointSyncs int
ConcurrentRCSyncs int
MinionRegexp string
NodeSyncPeriod time.Duration
ResourceQuotaSyncPeriod time.Duration
NamespaceSyncPeriod time.Duration
PVClaimBinderSyncPeriod time.Duration
RegisterRetryCount int
MachineList util.StringList
SyncNodeList bool
SyncNodeStatus bool
NodeMonitorGracePeriod time.Duration
NodeStartupGracePeriod time.Duration
NodeMonitorPeriod time.Duration
@ -76,10 +71,6 @@ type CMServer struct {
DeletingPodsBurst int
ServiceAccountKeyFile string
// TODO: Discover these by pinging the host machines, and rip out these params.
NodeMilliCPU int64
NodeMemory resource.Quantity
ClusterName string
ClusterCIDR util.IPNet
AllocateNodeCIDRs bool
@ -87,6 +78,14 @@ type CMServer struct {
Master string
Kubeconfig string
// The following fields are deprecated and unused except in flag parsing.
MinionRegexp string
MachineList util.StringList
SyncNodeList bool
SyncNodeStatus bool
NodeMilliCPU int64
NodeMemory resource.Quantity
}
// NewCMServer creates a new CMServer with a default config.
@ -102,8 +101,6 @@ func NewCMServer() *CMServer {
PVClaimBinderSyncPeriod: 10 * time.Second,
RegisterRetryCount: 10,
PodEvictionTimeout: 5 * time.Minute,
NodeMilliCPU: 1000,
NodeMemory: resource.MustParse("3Gi"),
SyncNodeList: true,
ClusterName: "kubernetes",
}
@ -119,6 +116,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
fs.IntVar(&s.ConcurrentRCSyncs, "concurrent_rc_syncs", s.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more reponsive replica management, but more CPU (and network) load")
fs.StringVar(&s.MinionRegexp, "minion-regexp", s.MinionRegexp, "If non empty, and --cloud-provider is specified, a regular expression for matching minion VMs.")
fs.MarkDeprecated("minion-regexp", "will be removed in a future version")
fs.DurationVar(&s.NodeSyncPeriod, "node-sync-period", s.NodeSyncPeriod, ""+
"The period for syncing nodes from cloudprovider. Longer periods will result in "+
"fewer calls to cloud provider, but may delay addition of new nodes to cluster.")
@ -131,7 +129,9 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+
"The number of retries for initial node registration. Retry interval equals node-sync-period.")
fs.Var(&s.MachineList, "machines", "List of machines to schedule onto, comma separated.")
fs.MarkDeprecated("machines", "will be removed in a future version")
fs.BoolVar(&s.SyncNodeList, "sync-nodes", s.SyncNodeList, "If true, and --cloud-provider is specified, sync nodes from the cloud provider. Default true.")
fs.MarkDeprecated("sync-nodes", "will be removed in a future version")
fs.BoolVar(&s.SyncNodeStatus, "sync-node-status", s.SyncNodeStatus,
"DEPRECATED. Does not have any effect now and it will be removed in a later release.")
fs.DurationVar(&s.NodeMonitorGracePeriod, "node-monitor-grace-period", 40*time.Second,
@ -143,10 +143,10 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.DurationVar(&s.NodeMonitorPeriod, "node-monitor-period", 5*time.Second,
"The period for syncing NodeStatus in NodeController.")
fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA key used to sign service account tokens.")
// TODO: Discover these by pinging the host machines, and rip out these flags.
// TODO: in the meantime, use resource.QuantityFlag() instead of these
fs.Int64Var(&s.NodeMilliCPU, "node-milli-cpu", s.NodeMilliCPU, "The amount of MilliCPU provisioned on each node")
fs.MarkDeprecated("node-milli-cpu", "will be removed in a future version")
fs.Var(resource.NewQuantityFlagValue(&s.NodeMemory), "node-memory", "The amount of memory (in bytes) provisioned on each node")
fs.MarkDeprecated("node-memory", "will be removed in a future version")
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster")
fs.Var(&s.ClusterCIDR, "cluster-cidr", "CIDR Range for Pods in cluster.")
@ -155,25 +155,8 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")
}
func (s *CMServer) verifyMinionFlags() {
if !s.SyncNodeList && s.MinionRegexp != "" {
glog.Info("--minion-regexp is ignored by --sync-nodes=false")
}
if s.CloudProvider == "" || s.MinionRegexp == "" {
if len(s.MachineList) == 0 {
glog.Info("No machines specified!")
}
return
}
if len(s.MachineList) != 0 {
glog.Info("--machines is overwritten by --minion-regexp")
}
}
// Run runs the CMServer. This should never exit.
func (s *CMServer) Run(_ []string) error {
s.verifyMinionFlags()
if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
@ -219,21 +202,15 @@ func (s *CMServer) Run(_ []string) error {
go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop)
cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
nodeResources := &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(s.NodeMilliCPU, resource.DecimalSI),
api.ResourceMemory: s.NodeMemory,
},
}
if s.SyncNodeStatus {
glog.Warning("DEPRECATION NOTICE: sync-node-status flag is being deprecated. It has no effect now and it will be removed in a future version.")
}
nodeController := nodecontroller.NewNodeController(cloud, s.MinionRegexp, nodeResources,
kubeClient, s.RegisterRetryCount, s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
nodeController := nodecontroller.NewNodeController(cloud, kubeClient, s.RegisterRetryCount,
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod, s.SyncNodeList)
nodeController.Run(s.NodeSyncPeriod)
serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName)
if err := serviceController.Run(s.NodeSyncPeriod); err != nil {

View File

@ -30,7 +30,6 @@ import (
kubeletapp "github.com/GoogleCloudPlatform/kubernetes/cmd/kubelet/app"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
@ -53,13 +52,10 @@ import (
)
var (
addr = flag.String("addr", "127.0.0.1", "The address to use for the apiserver.")
port = flag.Int("port", 8080, "The port for the apiserver to use.")
dockerEndpoint = flag.String("docker-endpoint", "", "If non-empty, use this for the docker endpoint to communicate with")
etcdServer = flag.String("etcd-server", "http://localhost:4001", "If non-empty, path to the set of etcd server to use")
// TODO: Discover these by pinging the host machines, and rip out these flags.
nodeMilliCPU = flag.Int64("node-milli-cpu", 1000, "The amount of MilliCPU provisioned on each node")
nodeMemory = flag.Int64("node-memory", 3*1024*1024*1024, "The amount of memory (in bytes) provisioned on each node")
addr = flag.String("addr", "127.0.0.1", "The address to use for the apiserver.")
port = flag.Int("port", 8080, "The port for the apiserver to use.")
dockerEndpoint = flag.String("docker-endpoint", "", "If non-empty, use this for the docker endpoint to communicate with")
etcdServer = flag.String("etcd-server", "http://localhost:4001", "If non-empty, path to the set of etcd server to use")
masterServiceNamespace = flag.String("master-service-namespace", api.NamespaceDefault, "The namespace from which the kubernetes master services should be injected into pods")
enableProfiling = flag.Bool("profiling", false, "Enable profiling via web interface host:port/debug/pprof/")
deletingPodsQps = flag.Float32("deleting-pods-qps", 0.1, "")
@ -123,19 +119,12 @@ func runScheduler(cl *client.Client) {
}
// RunControllerManager starts a controller
func runControllerManager(cl *client.Client, nodeMilliCPU, nodeMemory int64) {
nodeResources := &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(nodeMilliCPU, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(nodeMemory, resource.BinarySI),
},
}
func runControllerManager(cl *client.Client) {
const nodeSyncPeriod = 10 * time.Second
nodeController := nodecontroller.NewNodeController(
nil, "", nodeResources, cl, 10, 5*time.Minute, util.NewTokenBucketRateLimiter(*deletingPodsQps, *deletingPodsBurst),
nil, cl, 10, 5*time.Minute, util.NewTokenBucketRateLimiter(*deletingPodsQps, *deletingPodsBurst),
40*time.Second, 60*time.Second, 5*time.Second, nil, false)
nodeController.Run(nodeSyncPeriod, true)
nodeController.Run(nodeSyncPeriod)
serviceController := servicecontroller.New(nil, cl, "kubernetes")
if err := serviceController.Run(nodeSyncPeriod); err != nil {
@ -152,7 +141,7 @@ func runControllerManager(cl *client.Client, nodeMilliCPU, nodeMemory int64) {
func startComponents(etcdClient tools.EtcdClient, cl *client.Client, addr net.IP, port int) {
runApiServer(etcdClient, addr, port, *masterServiceNamespace)
runScheduler(cl)
runControllerManager(cl, *nodeMilliCPU, *nodeMemory)
runControllerManager(cl)
dockerClient := dockertools.ConnectToDockerOrDie(*dockerEndpoint)
cadvisorInterface, err := cadvisor.New(0)

View File

@ -50,8 +50,6 @@ type nodeStatusData struct {
type NodeController struct {
cloud cloudprovider.Interface
matchRE string
staticResources *api.NodeResources
kubeClient client.Interface
recorder record.EventRecorder
registerRetryCount int
@ -93,8 +91,6 @@ type NodeController struct {
// NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController(
cloud cloudprovider.Interface,
matchRE string,
staticResources *api.NodeResources,
kubeClient client.Interface,
registerRetryCount int,
podEvictionTimeout time.Duration,
@ -117,8 +113,6 @@ func NewNodeController(
}
return &NodeController{
cloud: cloud,
matchRE: matchRE,
staticResources: staticResources,
kubeClient: kubeClient,
recorder: recorder,
registerRetryCount: registerRetryCount,
@ -178,7 +172,7 @@ func (nc *NodeController) reconcileNodeCIDRs(nodes *api.NodeList) {
}
// Run starts an asynchronous loop that monitors the status of cluster nodes.
func (nc *NodeController) Run(period time.Duration, syncNodeList bool) {
func (nc *NodeController) Run(period time.Duration) {
// Incorporate the results of node status pushed from kubelet to master.
go util.Forever(func() {
if err := nc.monitorNodeStatus(); err != nil {

View File

@ -324,7 +324,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
}
for _, item := range table {
nodeController := NewNodeController(nil, "", nil, item.fakeNodeHandler, 10,
nodeController := NewNodeController(nil, item.fakeNodeHandler, 10,
evictionTimeout, util.NewFakeRateLimiter(), testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
nodeController.now = func() util.Time { return fakeNow }
@ -527,7 +527,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
}
for _, item := range table {
nodeController := NewNodeController(nil, "", nil, item.fakeNodeHandler, 10, 5*time.Minute, util.NewFakeRateLimiter(),
nodeController := NewNodeController(nil, item.fakeNodeHandler, 10, 5*time.Minute, util.NewFakeRateLimiter(),
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
nodeController.now = func() util.Time { return fakeNow }
if err := nodeController.monitorNodeStatus(); err != nil {