From 6d895f8b256ff8c63a0994ad03300f3779676a60 Mon Sep 17 00:00:00 2001 From: alexhegit <31022192+alexhegit@users.noreply.github.com> Date: Fri, 18 Aug 2023 00:08:24 +0800 Subject: [PATCH] Do the device_map more automaticlly for multi-GPUs The accelerate lib provide some functions to do the device_map automaticlly. This patch use infer_auto_device_map() to relpace the hard codes which splite the mode by hand. It works well with my two GPUs(6GB-RTX3060 + 12GB-RTX3060). Signed-off-by: Alex He --- utils.py | 45 +++++++-------------------------------------- 1 file changed, 7 insertions(+), 38 deletions(-) diff --git a/utils.py b/utils.py index 78fa5e8..d78f706 100644 --- a/utils.py +++ b/utils.py @@ -5,43 +5,6 @@ from torch.nn import Module from transformers import AutoModel -def auto_configure_device_map(num_gpus: int) -> Dict[str, int]: - # transformer.word_embeddings 占用1层 - # transformer.final_layernorm 和 lm_head 占用1层 - # transformer.layers 占用 28 层 - # 总共30层分配到num_gpus张卡上 - num_trans_layers = 28 - per_gpu_layers = 30 / num_gpus - - # bugfix: 在linux中调用torch.embedding传入的weight,input不在同一device上,导致RuntimeError - # windows下 model.device 会被设置成 transformer.word_embeddings.device - # linux下 model.device 会被设置成 lm_head.device - # 在调用chat或者stream_chat时,input_ids会被放到model.device上 - # 如果transformer.word_embeddings.device和model.device不同,则会导致RuntimeError - # 因此这里将transformer.word_embeddings,transformer.final_layernorm,lm_head都放到第一张卡上 - # 本文件来源于https://github.com/THUDM/ChatGLM-6B/blob/main/utils.py - # 仅此处做少许修改以支持ChatGLM2 - device_map = { - 'transformer.embedding.word_embeddings': 0, - 'transformer.encoder.final_layernorm': 0, - 'transformer.output_layer': 0, - 'transformer.rotary_pos_emb': 0, - 'lm_head': 0 - } - - used = 2 - gpu_target = 0 - for i in range(num_trans_layers): - if used >= per_gpu_layers: - gpu_target += 1 - used = 0 - assert gpu_target < num_gpus - device_map[f'transformer.encoder.layers.{i}'] = gpu_target - used += 1 - - return device_map - - def load_model_on_gpus(checkpoint_path: Union[str, os.PathLike], num_gpus: int = 2, device_map: Optional[Dict[str, int]] = None, **kwargs) -> Module: if num_gpus < 2 and device_map is None: @@ -52,7 +15,13 @@ def load_model_on_gpus(checkpoint_path: Union[str, os.PathLike], num_gpus: int = model = AutoModel.from_pretrained(checkpoint_path, trust_remote_code=True, **kwargs).half() if device_map is None: - device_map = auto_configure_device_map(num_gpus) + from accelerate import infer_auto_device_map + + device_map = infer_auto_device_map(model, no_split_module_classes=["GLMBlock"]) + # e.g. Use max_memory to set the upper limit memory size of each device. + # Huggingface suggest to save some memory of gpu0 for some reasons. + #device_map = infer_auto_device_map(model, max_memory={0: "4GiB", 1: "10GiB", "cpu": "30GiB"}, no_split_module_classes=["GLMBlock"]) + #print(device_map) model = dispatch_model(model, device_map=device_map)