mirror of https://github.com/THUDM/ChatGLM-6B
Add support for manual device_map; auto-config if None
parent
c31d9c0984
commit
c313af0639
2
utils.py
2
utils.py
|
@ -38,12 +38,14 @@ def auto_configure_device_map(num_gpus: int) -> Dict[str, int]:
|
||||||
|
|
||||||
def load_model_on_gpus(checkpoint_path: Union[str, os.PathLike], num_gpus: int = 2,
|
def load_model_on_gpus(checkpoint_path: Union[str, os.PathLike], num_gpus: int = 2,
|
||||||
multi_gpu_model_cache_dir: Union[str, os.PathLike] = "./temp_model_dir",
|
multi_gpu_model_cache_dir: Union[str, os.PathLike] = "./temp_model_dir",
|
||||||
|
device_map: Optional[Dict[str, int]] = None,
|
||||||
tokenizer: Optional[PreTrainedTokenizer] = None, **kwargs) -> Module:
|
tokenizer: Optional[PreTrainedTokenizer] = None, **kwargs) -> Module:
|
||||||
from accelerate import load_checkpoint_and_dispatch
|
from accelerate import load_checkpoint_and_dispatch
|
||||||
|
|
||||||
model = AutoModel.from_pretrained(checkpoint_path, trust_remote_code=True, **kwargs)
|
model = AutoModel.from_pretrained(checkpoint_path, trust_remote_code=True, **kwargs)
|
||||||
model = model.eval()
|
model = model.eval()
|
||||||
|
|
||||||
|
if device_map is None:
|
||||||
device_map = auto_configure_device_map(num_gpus)
|
device_map = auto_configure_device_map(num_gpus)
|
||||||
try:
|
try:
|
||||||
model = load_checkpoint_and_dispatch(
|
model = load_checkpoint_and_dispatch(
|
||||||
|
|
Loading…
Reference in New Issue