fix issues

pull/410/head
vapao 2021-09-18 18:50:03 +08:00
parent 63d85b91ed
commit 105abd757f
1 changed files with 60 additions and 34 deletions

View File

@ -75,22 +75,35 @@ def _ext1_deploy(req, helper, env):
req.repository = rep req.repository = rep
extend = req.deploy.extend_obj extend = req.deploy.extend_obj
env.update(SPUG_DST_DIR=extend.dst_dir) env.update(SPUG_DST_DIR=extend.dst_dir)
threads, latest_exception = [], None if req.deploy.is_parallel:
max_workers = max(10, os.cpu_count() * 5) if req.deploy.is_parallel else 1 threads, latest_exception = [], None
with futures.ThreadPoolExecutor(max_workers=max_workers) as executor: max_workers = max(10, os.cpu_count() * 5)
for h_id in json.loads(req.host_ids): with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
env = AttrDict(env.items()) for h_id in json.loads(req.host_ids):
t = executor.submit(_deploy_ext1_host, req, helper, h_id, env) new_env = AttrDict(env.items())
t.h_id = h_id t = executor.submit(_deploy_ext1_host, req, helper, h_id, new_env)
threads.append(t) t.h_id = h_id
for t in futures.as_completed(threads): threads.append(t)
exception = t.exception() for t in futures.as_completed(threads):
if exception: exception = t.exception()
latest_exception = exception if exception:
if not isinstance(exception, SpugError): latest_exception = exception
helper.send_error(t.h_id, f'Exception: {exception}', False) if not isinstance(exception, SpugError):
if latest_exception: helper.send_error(t.h_id, f'Exception: {exception}', False)
raise latest_exception if latest_exception:
raise latest_exception
else:
host_ids = sorted(json.loads(req.host_ids), reverse=True)
while host_ids:
h_id = host_ids.pop()
new_env = AttrDict(env.items())
try:
_deploy_ext1_host(req, helper, h_id, new_env)
except Exception as e:
helper.send_error(h_id, f'Exception: {e}', False)
for h_id in host_ids:
helper.send_error(h_id, '终止发布', False)
raise e
def _ext2_deploy(req, helper, env): def _ext2_deploy(req, helper, env):
@ -140,24 +153,37 @@ def _ext2_deploy(req, helper, env):
tmp_transfer_file = os.path.join(sp_dir, tar_gz_file) tmp_transfer_file = os.path.join(sp_dir, tar_gz_file)
break break
if host_actions: if host_actions:
threads, latest_exception = [], None if req.deploy.is_parallel:
max_workers = max(10, os.cpu_count() * 5) if req.deploy.is_parallel else 1 threads, latest_exception = [], None
with futures.ThreadPoolExecutor(max_workers=max_workers) as executor: max_workers = max(10, os.cpu_count() * 5)
for h_id in json.loads(req.host_ids): with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
env = AttrDict(env.items()) for h_id in json.loads(req.host_ids):
t = executor.submit(_deploy_ext2_host, helper, h_id, host_actions, env, req.spug_version) new_env = AttrDict(env.items())
t.h_id = h_id t = executor.submit(_deploy_ext2_host, helper, h_id, host_actions, new_env, req.spug_version)
threads.append(t) t.h_id = h_id
for t in futures.as_completed(threads): threads.append(t)
exception = t.exception() for t in futures.as_completed(threads):
if exception: exception = t.exception()
latest_exception = exception if exception:
if not isinstance(exception, SpugError): latest_exception = exception
helper.send_error(t.h_id, f'Exception: {exception}', False) if not isinstance(exception, SpugError):
if tmp_transfer_file: helper.send_error(t.h_id, f'Exception: {exception}', False)
os.remove(tmp_transfer_file) if tmp_transfer_file:
if latest_exception: os.remove(tmp_transfer_file)
raise latest_exception if latest_exception:
raise latest_exception
else:
host_ids = sorted(json.loads(req.host_ids), reverse=True)
while host_ids:
h_id = host_ids.pop()
new_env = AttrDict(env.items())
try:
_deploy_ext2_host(helper, h_id, host_actions, new_env, req.spug_version)
except Exception as e:
helper.send_error(h_id, f'Exception: {e}', False)
for h_id in host_ids:
helper.send_error(h_id, '终止发布', False)
raise e
else: else:
helper.send_step('local', 100, f'\r\n{human_time()} ** 发布成功 **') helper.send_step('local', 100, f'\r\n{human_time()} ** 发布成功 **')