diff --git a/cluster/juju/layers/kubernetes-master/lib/charms/kubernetes/flagmanager.py b/cluster/juju/layers/kubernetes-master/lib/charms/kubernetes/flagmanager.py deleted file mode 100644 index 7fe5737a6e..0000000000 --- a/cluster/juju/layers/kubernetes-master/lib/charms/kubernetes/flagmanager.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core import unitdata - - -class FlagManager: - ''' - FlagManager - A Python class for managing the flags to pass to an - application without remembering what's been set previously. - - This is a blind class assuming the operator knows what they are doing. - Each instance of this class should be initialized with the intended - application to manage flags. Flags are then appended to a data-structure - and cached in unitdata for later recall. - - THe underlying data-provider is backed by a SQLITE database on each unit, - tracking the dictionary, provided from the 'charmhelpers' python package. - Summary: - opts = FlagManager('docker') - opts.add('bip', '192.168.22.2') - opts.to_s() - ''' - - def __init__(self, daemon, opts_path=None): - self.db = unitdata.kv() - self.daemon = daemon - if not self.db.get(daemon): - self.data = {} - else: - self.data = self.db.get(daemon) - - def __save(self): - self.db.set(self.daemon, self.data) - - def add(self, key, value, strict=False): - ''' - Adds data to the map of values for the DockerOpts file. - Supports single values, or "multiopt variables". If you - have a flag only option, like --tlsverify, set the value - to None. To preserve the exact value, pass strict - eg: - opts.add('label', 'foo') - opts.add('label', 'foo, bar, baz') - opts.add('flagonly', None) - opts.add('cluster-store', 'consul://a:4001,b:4001,c:4001/swarm', - strict=True) - ''' - if strict: - self.data['{}-strict'.format(key)] = value - self.__save() - return - - if value: - values = [x.strip() for x in value.split(',')] - # handle updates - if key in self.data and self.data[key] is not None: - item_data = self.data[key] - for c in values: - c = c.strip() - if c not in item_data: - item_data.append(c) - self.data[key] = item_data - else: - # handle new - self.data[key] = values - else: - # handle flagonly - self.data[key] = None - self.__save() - - def remove(self, key, value): - ''' - Remove a flag value from the DockerOpts manager - Assuming the data is currently {'foo': ['bar', 'baz']} - d.remove('foo', 'bar') - > {'foo': ['baz']} - :params key: - :params value: - ''' - self.data[key].remove(value) - self.__save() - - def destroy(self, key, strict=False): - ''' - Destructively remove all values and key from the FlagManager - Assuming the data is currently {'foo': ['bar', 'baz']} - d.wipe('foo') - >{} - :params key: - :params strict: - ''' - try: - if strict: - self.data.pop('{}-strict'.format(key)) - else: - self.data.pop(key) - self.__save() - except KeyError: - pass - - def get(self, key, default=None): - """Return the value for ``key``, or the default if ``key`` doesn't exist. - - """ - return self.data.get(key, default) - - def destroy_all(self): - ''' - Destructively removes all data from the FlagManager. - ''' - self.data.clear() - self.__save() - - def to_s(self): - ''' - Render the flags to a single string, prepared for the Docker - Defaults file. Typically in /etc/default/docker - d.to_s() - > "--foo=bar --foo=baz" - ''' - flags = [] - for key in self.data: - if self.data[key] is None: - # handle flagonly - flags.append("{}".format(key)) - elif '-strict' in key: - # handle strict values, and do it in 2 steps. - # If we rstrip -strict it strips a tailing s - proper_key = key.rstrip('strict').rstrip('-') - flags.append("{}={}".format(proper_key, self.data[key])) - else: - # handle multiopt and typical flags - for item in self.data[key]: - flags.append("{}={}".format(key, item)) - return ' '.join(flags) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 4d2e7bd112..e7b3b55065 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -43,7 +43,6 @@ from charms.reactive import when, when_any, when_not, when_all from charms.reactive.helpers import data_changed, any_file_changed from charms.kubernetes.common import get_version from charms.kubernetes.common import retry -from charms.kubernetes.flagmanager import FlagManager from charms.layer import tls_client @@ -172,11 +171,6 @@ def migrate_from_pre_snaps(): hookenv.log("Removing file: " + file) os.remove(file) - # clear the flag managers - FlagManager('kube-apiserver').destroy_all() - FlagManager('kube-controller-manager').destroy_all() - FlagManager('kube-scheduler').destroy_all() - def install_snaps(): channel = hookenv.config('channel') @@ -228,15 +222,10 @@ def configure_cni(cni): @when_not('authentication.setup') def setup_leader_authentication(): '''Setup basic authentication and token access for the cluster.''' - api_opts = FlagManager('kube-apiserver') - controller_opts = FlagManager('kube-controller-manager') - service_key = '/root/cdk/serviceaccount.key' basic_auth = '/root/cdk/basic_auth.csv' known_tokens = '/root/cdk/known_tokens.csv' - api_opts.add('basic-auth-file', basic_auth) - api_opts.add('token-auth-file', known_tokens) hookenv.status_set('maintenance', 'Rendering authentication templates.') keys = [service_key, basic_auth, known_tokens] @@ -257,9 +246,6 @@ def setup_leader_authentication(): check_call(cmd) remove_state('reconfigure.authentication.setup') - api_opts.add('service-account-key-file', service_key) - controller_opts.add('service-account-private-key-file', service_key) - # read service account key for syndication leader_data = {} for f in [known_tokens, basic_auth, service_key]: @@ -294,13 +280,6 @@ def setup_non_leader_authentication(): return hookenv.status_set('maintenance', 'Rendering authentication templates.') - api_opts = FlagManager('kube-apiserver') - api_opts.add('basic-auth-file', basic_auth) - api_opts.add('token-auth-file', known_tokens) - api_opts.add('service-account-key-file', service_key) - - controller_opts = FlagManager('kube-controller-manager') - controller_opts.add('service-account-private-key-file', service_key) remove_state('kubernetes-master.components.started') set_state('authentication.setup') @@ -400,7 +379,7 @@ def start_master(etcd): handle_etcd_relation(etcd) # Add CLI options to all components - configure_apiserver() + configure_apiserver(etcd) configure_controller_manager() configure_scheduler() @@ -768,8 +747,9 @@ def on_config_allow_privileged_change(): @when('config.changed.api-extra-args') @when('kubernetes-master.components.started') -def on_config_api_extra_args_change(): - configure_apiserver() +@when('etcd.available') +def on_config_api_extra_args_change(etcd): + configure_apiserver(etcd) @when('config.changed.controller-manager-extra-args') @@ -957,9 +937,9 @@ def get_kubernetes_service_ip(): def handle_etcd_relation(reldata): ''' Save the client credentials and set appropriate daemon flags when etcd declares itself as available''' - connection_string = reldata.get_connection_string() # Define where the etcd tls files will be kept. etcd_dir = '/root/cdk/etcd' + # Create paths to the etcd client ca, key, and cert file locations. ca = os.path.join(etcd_dir, 'client-ca.pem') key = os.path.join(etcd_dir, 'client-key.pem') @@ -968,85 +948,45 @@ def handle_etcd_relation(reldata): # Save the client credentials (in relation data) to the paths provided. reldata.save_client_credentials(key, cert, ca) - api_opts = FlagManager('kube-apiserver') - # Never use stale data, always prefer whats coming in during context - # building. if its stale, its because whats in unitdata is stale - data = api_opts.data - if data.get('etcd-servers-strict') or data.get('etcd-servers'): - api_opts.destroy('etcd-cafile') - api_opts.destroy('etcd-keyfile') - api_opts.destroy('etcd-certfile') - api_opts.destroy('etcd-servers', strict=True) - api_opts.destroy('etcd-servers') +def parse_extra_args(config_key): + elements = hookenv.config().get(config_key, '').split() + args = {} - # Set the apiserver flags in the options manager - api_opts.add('etcd-cafile', ca) - api_opts.add('etcd-keyfile', key) - api_opts.add('etcd-certfile', cert) - api_opts.add('etcd-servers', connection_string, strict=True) - - -def get_config_args(key): - db = unitdata.kv() - old_config_args = db.get(key, []) - # We have to convert them to tuples becuase we use sets - old_config_args = [tuple(i) for i in old_config_args] - new_config_args = [] - new_config_arg_names = [] - for arg in hookenv.config().get(key, '').split(): - new_config_arg_names.append(arg.split('=', 1)[0]) - if len(arg.split('=', 1)) == 1: # handle flags ie. --profiling - new_config_args.append(tuple([arg, 'true'])) + for element in elements: + if '=' in element: + key, _, value = element.partition('=') + args[key] = value else: - new_config_args.append(tuple(arg.split('=', 1))) + args[element] = 'true' - hookenv.log('Handling "%s" option.' % key) - hookenv.log('Old arguments: {}'.format(old_config_args)) - hookenv.log('New arguments: {}'.format(new_config_args)) - if set(new_config_args) == set(old_config_args): - return (new_config_args, []) - # Store new args - db.set(key, new_config_args) - to_add = set(new_config_args) - to_remove = set(old_config_args) - set(new_config_args) - # Extract option names only - to_remove = [i[0] for i in to_remove if i[0] not in new_config_arg_names] - return (to_add, to_remove) + return args def configure_kubernetes_service(service, base_args, extra_args_key): - # Handle api-extra-args config option - to_add, to_remove = get_config_args(extra_args_key) + db = unitdata.kv() - flag_manager = FlagManager(service) + prev_args_key = 'kubernetes-master.prev_args.' + service + prev_args = db.get(prev_args_key) or {} - # Remove arguments that are no longer provided as config option - # this allows them to be reverted to charm defaults - for arg in to_remove: - hookenv.log('Removing option: {}'.format(arg)) - flag_manager.destroy(arg) - # We need to "unset" options by setting their value to "null" string - cmd = ['snap', 'set', service, '{}=null'.format(arg)] - check_call(cmd) + extra_args = parse_extra_args(extra_args_key) - # Add base arguments + args = {} + for arg in prev_args: + # remove previous args by setting to null + args[arg] = 'null' for k, v in base_args.items(): - flag_manager.add(k, v, strict=True) + args[k] = v + for k, v in extra_args.items(): + args[k] = v - # Add operator-provided arguments, this allows operators - # to override defaults - for arg in to_add: - hookenv.log('Adding option: {} {}'.format(arg[0], arg[1])) - # Make sure old value is gone - flag_manager.destroy(arg[0]) - flag_manager.add(arg[0], arg[1], strict=True) - - cmd = ['snap', 'set', service] + flag_manager.to_s().split(' ') + cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()] check_call(cmd) + db.set(prev_args_key, args) -def configure_apiserver(): + +def configure_apiserver(etcd): api_opts = {} # Get the tls paths from the layer data. @@ -1078,6 +1018,20 @@ def configure_apiserver(): api_opts['insecure-port'] = '8080' api_opts['storage-backend'] = 'etcd2' # FIXME: add etcd3 support + api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv' + api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv' + api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key' + + etcd_dir = '/root/cdk/etcd' + etcd_ca = os.path.join(etcd_dir, 'client-ca.pem') + etcd_key = os.path.join(etcd_dir, 'client-key.pem') + etcd_cert = os.path.join(etcd_dir, 'client-cert.pem') + + api_opts['etcd-cafile'] = etcd_ca + api_opts['etcd-keyfile'] = etcd_key + api_opts['etcd-certfile'] = etcd_cert + api_opts['etcd-servers'] = etcd.get_connection_string() + admission_control = [ 'Initializers', 'NamespaceLifecycle', @@ -1120,6 +1074,9 @@ def configure_controller_manager(): controller_opts['logtostderr'] = 'true' controller_opts['master'] = 'http://127.0.0.1:8080' + controller_opts['service-account-private-key-file'] = \ + '/root/cdk/serviceaccount.key' + configure_kubernetes_service('kube-controller-manager', controller_opts, 'controller-manager-extra-args') diff --git a/cluster/juju/layers/kubernetes-worker/config.yaml b/cluster/juju/layers/kubernetes-worker/config.yaml index 9c91f7fc82..3fddf002f7 100644 --- a/cluster/juju/layers/kubernetes-worker/config.yaml +++ b/cluster/juju/layers/kubernetes-worker/config.yaml @@ -31,3 +31,21 @@ options: description: | When true, worker services will not be upgraded until the user triggers it manually by running the upgrade action. + kubelet-extra-args: + type: string + default: "" + description: | + Space separated list of flags and key=value pairs that will be passed as arguments to + kubelet. For example a value like this: + runtime-config=batch/v2alpha1=true profiling=true + will result in kube-apiserver being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + proxy-extra-args: + type: string + default: "" + description: | + Space separated list of flags and key=value pairs that will be passed as arguments to + kube-proxy. For example a value like this: + runtime-config=batch/v2alpha1=true profiling=true + will result in kube-apiserver being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true diff --git a/cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/flagmanager.py b/cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/flagmanager.py deleted file mode 100644 index 7fe5737a6e..0000000000 --- a/cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/flagmanager.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core import unitdata - - -class FlagManager: - ''' - FlagManager - A Python class for managing the flags to pass to an - application without remembering what's been set previously. - - This is a blind class assuming the operator knows what they are doing. - Each instance of this class should be initialized with the intended - application to manage flags. Flags are then appended to a data-structure - and cached in unitdata for later recall. - - THe underlying data-provider is backed by a SQLITE database on each unit, - tracking the dictionary, provided from the 'charmhelpers' python package. - Summary: - opts = FlagManager('docker') - opts.add('bip', '192.168.22.2') - opts.to_s() - ''' - - def __init__(self, daemon, opts_path=None): - self.db = unitdata.kv() - self.daemon = daemon - if not self.db.get(daemon): - self.data = {} - else: - self.data = self.db.get(daemon) - - def __save(self): - self.db.set(self.daemon, self.data) - - def add(self, key, value, strict=False): - ''' - Adds data to the map of values for the DockerOpts file. - Supports single values, or "multiopt variables". If you - have a flag only option, like --tlsverify, set the value - to None. To preserve the exact value, pass strict - eg: - opts.add('label', 'foo') - opts.add('label', 'foo, bar, baz') - opts.add('flagonly', None) - opts.add('cluster-store', 'consul://a:4001,b:4001,c:4001/swarm', - strict=True) - ''' - if strict: - self.data['{}-strict'.format(key)] = value - self.__save() - return - - if value: - values = [x.strip() for x in value.split(',')] - # handle updates - if key in self.data and self.data[key] is not None: - item_data = self.data[key] - for c in values: - c = c.strip() - if c not in item_data: - item_data.append(c) - self.data[key] = item_data - else: - # handle new - self.data[key] = values - else: - # handle flagonly - self.data[key] = None - self.__save() - - def remove(self, key, value): - ''' - Remove a flag value from the DockerOpts manager - Assuming the data is currently {'foo': ['bar', 'baz']} - d.remove('foo', 'bar') - > {'foo': ['baz']} - :params key: - :params value: - ''' - self.data[key].remove(value) - self.__save() - - def destroy(self, key, strict=False): - ''' - Destructively remove all values and key from the FlagManager - Assuming the data is currently {'foo': ['bar', 'baz']} - d.wipe('foo') - >{} - :params key: - :params strict: - ''' - try: - if strict: - self.data.pop('{}-strict'.format(key)) - else: - self.data.pop(key) - self.__save() - except KeyError: - pass - - def get(self, key, default=None): - """Return the value for ``key``, or the default if ``key`` doesn't exist. - - """ - return self.data.get(key, default) - - def destroy_all(self): - ''' - Destructively removes all data from the FlagManager. - ''' - self.data.clear() - self.__save() - - def to_s(self): - ''' - Render the flags to a single string, prepared for the Docker - Defaults file. Typically in /etc/default/docker - d.to_s() - > "--foo=bar --foo=baz" - ''' - flags = [] - for key in self.data: - if self.data[key] is None: - # handle flagonly - flags.append("{}".format(key)) - elif '-strict' in key: - # handle strict values, and do it in 2 steps. - # If we rstrip -strict it strips a tailing s - proper_key = key.rstrip('strict').rstrip('-') - flags.append("{}={}".format(proper_key, self.data[key])) - else: - # handle multiopt and typical flags - for item in self.data[key]: - flags.append("{}={}".format(key, item)) - return ' '.join(flags) diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index 52c84c0d81..d43f06768a 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -32,7 +32,6 @@ from charms.reactive import set_state, remove_state, is_state from charms.reactive import when, when_any, when_not from charms.kubernetes.common import get_version -from charms.kubernetes.flagmanager import FlagManager from charms.reactive.helpers import data_changed, any_file_changed from charms.templating.jinja2 import render @@ -66,9 +65,6 @@ def upgrade_charm(): # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags, # since they can differ between k8s versions remove_state('kubernetes-worker.gpu.enabled') - kubelet_opts = FlagManager('kubelet') - kubelet_opts.destroy('feature-gates') - kubelet_opts.destroy('experimental-nvidia-gpus') remove_state('kubernetes-worker.cni-plugins.installed') remove_state('kubernetes-worker.config.created') @@ -124,10 +120,6 @@ def cleanup_pre_snap_services(): hookenv.log("Removing file: " + file) os.remove(file) - # cleanup old flagmanagers - FlagManager('kubelet').destroy_all() - FlagManager('kube-proxy').destroy_all() - @when('config.changed.channel') def channel_changed(): @@ -344,7 +336,8 @@ def start_worker(kube_api, kube_control, auth_control, cni): set_privileged() create_config(random.choice(servers), creds) - configure_worker_services(servers, dns, cluster_cidr) + configure_kubelet(dns) + configure_kube_proxy(servers, cluster_cidr) set_state('kubernetes-worker.config.created') restart_unit_services() update_kubelet_status() @@ -436,6 +429,12 @@ def apply_node_labels(): _apply_node_label(label, overwrite=True) +@when_any('config.changed.kubelet-extra-args', + 'config.changed.proxy-extra-args') +def extra_args_changed(): + set_state('kubernetes-worker.restart-needed') + + def arch(): '''Return the package architecture as a string. Raise an exception if the architecture is not supported by kubernetes.''' @@ -469,44 +468,92 @@ def create_config(server, creds): token=creds['proxy_token'], user='kube-proxy') -def configure_worker_services(api_servers, dns, cluster_cidr): - ''' Add remaining flags for the worker services and configure snaps to use - them ''' +def parse_extra_args(config_key): + elements = hookenv.config().get(config_key, '').split() + args = {} + + for element in elements: + if '=' in element: + key, _, value = element.partition('=') + args[key] = value + else: + args[element] = 'true' + + return args + + +def configure_kubernetes_service(service, base_args, extra_args_key): + db = unitdata.kv() + + prev_args_key = 'kubernetes-worker.prev_args.' + service + prev_args = db.get(prev_args_key) or {} + + extra_args = parse_extra_args(extra_args_key) + + args = {} + for arg in prev_args: + # remove previous args by setting to null + args[arg] = 'null' + for k, v in base_args.items(): + args[k] = v + for k, v in extra_args.items(): + args[k] = v + + cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()] + check_call(cmd) + + db.set(prev_args_key, args) + + +def configure_kubelet(dns): layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') - kubelet_opts = FlagManager('kubelet') - kubelet_opts.add('require-kubeconfig', 'true') - kubelet_opts.add('kubeconfig', kubeconfig_path) - kubelet_opts.add('network-plugin', 'cni') - kubelet_opts.add('v', '0') - kubelet_opts.add('address', '0.0.0.0') - kubelet_opts.add('port', '10250') - kubelet_opts.add('cluster-dns', dns['sdn-ip']) - kubelet_opts.add('cluster-domain', dns['domain']) - kubelet_opts.add('anonymous-auth', 'false') - kubelet_opts.add('client-ca-file', ca_cert_path) - kubelet_opts.add('tls-cert-file', server_cert_path) - kubelet_opts.add('tls-private-key-file', server_key_path) - kubelet_opts.add('logtostderr', 'true') - kubelet_opts.add('fail-swap-on', 'false') + kubelet_opts = {} + kubelet_opts['require-kubeconfig'] = 'true' + kubelet_opts['kubeconfig'] = kubeconfig_path + kubelet_opts['network-plugin'] = 'cni' + kubelet_opts['v'] = '0' + kubelet_opts['address'] = '0.0.0.0' + kubelet_opts['port'] = '10250' + kubelet_opts['cluster-dns'] = dns['sdn-ip'] + kubelet_opts['cluster-domain'] = dns['domain'] + kubelet_opts['anonymous-auth'] = 'false' + kubelet_opts['client-ca-file'] = ca_cert_path + kubelet_opts['tls-cert-file'] = server_cert_path + kubelet_opts['tls-private-key-file'] = server_key_path + kubelet_opts['logtostderr'] = 'true' + kubelet_opts['fail-swap-on'] = 'false' - kube_proxy_opts = FlagManager('kube-proxy') - kube_proxy_opts.add('cluster-cidr', cluster_cidr) - kube_proxy_opts.add('kubeconfig', kubeproxyconfig_path) - kube_proxy_opts.add('logtostderr', 'true') - kube_proxy_opts.add('v', '0') - kube_proxy_opts.add('master', random.choice(api_servers), strict=True) + privileged = is_state('kubernetes-worker.privileged') + kubelet_opts['allow-privileged'] = 'true' if privileged else 'false' + + if is_state('kubernetes-worker.gpu.enabled'): + if get_version('kubelet') < (1, 6): + hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet') + kubelet_opts['experimental-nvidia-gpus'] = '1' + else: + hookenv.log('Adding --feature-gates=Accelerators=true to kubelet') + kubelet_opts['feature-gates'] = 'Accelerators=true' + + configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args') + + +def configure_kube_proxy(api_servers, cluster_cidr): + kube_proxy_opts = {} + kube_proxy_opts['cluster-cidr'] = cluster_cidr + kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path + kube_proxy_opts['logtostderr'] = 'true' + kube_proxy_opts['v'] = '0' + kube_proxy_opts['master'] = random.choice(api_servers) if b'lxc' in check_output('virt-what', shell=True): - kube_proxy_opts.add('conntrack-max-per-core', '0') + kube_proxy_opts['conntrack-max-per-core'] = '0' - cmd = ['snap', 'set', 'kubelet'] + kubelet_opts.to_s().split(' ') - check_call(cmd) - cmd = ['snap', 'set', 'kube-proxy'] + kube_proxy_opts.to_s().split(' ') - check_call(cmd) + configure_kubernetes_service('kube-proxy', kube_proxy_opts, + 'proxy-extra-args') def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, @@ -700,12 +747,6 @@ def set_privileged(): gpu_enabled = is_state('kubernetes-worker.gpu.enabled') privileged = 'true' if gpu_enabled else 'false' - flag = 'allow-privileged' - hookenv.log('Setting {}={}'.format(flag, privileged)) - - kubelet_opts = FlagManager('kubelet') - kubelet_opts.add(flag, privileged) - if privileged == 'true': set_state('kubernetes-worker.privileged') else: @@ -748,14 +789,6 @@ def enable_gpu(): hookenv.log(cpe) return - kubelet_opts = FlagManager('kubelet') - if get_version('kubelet') < (1, 6): - hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet') - kubelet_opts.add('experimental-nvidia-gpus', '1') - else: - hookenv.log('Adding --feature-gates=Accelerators=true to kubelet') - kubelet_opts.add('feature-gates', 'Accelerators=true') - # Apply node labels _apply_node_label('gpu=true', overwrite=True) _apply_node_label('cuda=true', overwrite=True) @@ -777,12 +810,6 @@ def disable_gpu(): """ hookenv.log('Disabling gpu mode') - kubelet_opts = FlagManager('kubelet') - if get_version('kubelet') < (1, 6): - kubelet_opts.destroy('experimental-nvidia-gpus') - else: - kubelet_opts.remove('feature-gates', 'Accelerators=true') - # Remove node labels _apply_node_label('gpu', delete=True) _apply_node_label('cuda', delete=True)