mirror of https://github.com/openspug/spug
fix issues
parent
26c3879b5c
commit
540e8b377d
|
@ -59,7 +59,7 @@ def _ext1_deploy(req, helper, env):
|
||||||
extend = req.deploy.extend_obj
|
extend = req.deploy.extend_obj
|
||||||
env.update(SPUG_DST_DIR=extend.dst_dir)
|
env.update(SPUG_DST_DIR=extend.dst_dir)
|
||||||
threads, latest_exception = [], None
|
threads, latest_exception = [], None
|
||||||
max_workers = min(10, os.cpu_count() * 4) if req.deploy.is_parallel else 1
|
max_workers = max(10, os.cpu_count() * 5) if req.deploy.is_parallel else 1
|
||||||
with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
|
with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||||
for h_id in json.loads(req.host_ids):
|
for h_id in json.loads(req.host_ids):
|
||||||
env = AttrDict(env.items())
|
env = AttrDict(env.items())
|
||||||
|
@ -124,7 +124,7 @@ def _ext2_deploy(req, helper, env):
|
||||||
break
|
break
|
||||||
if host_actions:
|
if host_actions:
|
||||||
threads, latest_exception = [], None
|
threads, latest_exception = [], None
|
||||||
max_workers = min(10, os.cpu_count() * 4) if req.deploy.is_parallel else 1
|
max_workers = max(10, os.cpu_count() * 5) if req.deploy.is_parallel else 1
|
||||||
with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
|
with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||||
for h_id in json.loads(req.host_ids):
|
for h_id in json.loads(req.host_ids):
|
||||||
env = AttrDict(env.items())
|
env = AttrDict(env.items())
|
||||||
|
|
|
@ -9,6 +9,7 @@ from apps.schedule.executors import schedule_worker_handler
|
||||||
from apps.monitor.executors import monitor_worker_handler
|
from apps.monitor.executors import monitor_worker_handler
|
||||||
from apps.exec.executors import exec_worker_handler
|
from apps.exec.executors import exec_worker_handler
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
EXEC_WORKER_KEY = settings.EXEC_WORKER_KEY
|
EXEC_WORKER_KEY = settings.EXEC_WORKER_KEY
|
||||||
MONITOR_WORKER_KEY = settings.MONITOR_WORKER_KEY
|
MONITOR_WORKER_KEY = settings.MONITOR_WORKER_KEY
|
||||||
|
@ -20,7 +21,7 @@ logging.basicConfig(level=logging.WARNING, format='%(asctime)s %(message)s')
|
||||||
class Worker:
|
class Worker:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.rds = get_redis_connection()
|
self.rds = get_redis_connection()
|
||||||
self._executor = ThreadPoolExecutor(max_workers=1)
|
self._executor = ThreadPoolExecutor(max_workers=max(50, os.cpu_count() * 20))
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
logging.warning('Running worker')
|
logging.warning('Running worker')
|
||||||
|
|
|
@ -214,7 +214,7 @@ def fetch_host_extend(ssh):
|
||||||
def batch_sync_host(token, hosts, password, ):
|
def batch_sync_host(token, hosts, password, ):
|
||||||
private_key, public_key = AppSetting.get_ssh_key()
|
private_key, public_key = AppSetting.get_ssh_key()
|
||||||
threads, latest_exception, rds = [], None, get_redis_connection()
|
threads, latest_exception, rds = [], None, get_redis_connection()
|
||||||
max_workers = min(10, os.cpu_count() * 4)
|
max_workers = max(10, os.cpu_count() * 5)
|
||||||
with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
|
with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
t = executor.submit(_sync_host_extend, host, private_key, public_key, password)
|
t = executor.submit(_sync_host_extend, host, private_key, public_key, password)
|
||||||
|
|
Loading…
Reference in New Issue