avoid contention in model loading

pull/1034/head
lambda 2023-05-16 07:56:11 +00:00
parent 2c25e52421
commit d68ae88488
2 changed files with 5 additions and 2 deletions

View File

@ -8,7 +8,7 @@
## 软件依赖
运行微调需要4.27.1版本的`transformers`。除 ChatGLM-6B 的依赖之外,还需要安装以下依赖
```
pip install rouge_chinese nltk jieba datasets
pip install rouge_chinese nltk jieba datasets filelock
```
## 使用方法

View File

@ -45,6 +45,8 @@ from trainer_seq2seq import Seq2SeqTrainer
from arguments import ModelArguments, DataTrainingArguments
from filelock import FileLock
logger = logging.getLogger(__name__)
def main():
@ -122,7 +124,8 @@ def main():
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
else:
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True)
with FileLock("model.lock"):
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True)
if model_args.quantization_bit is not None:
print(f"Quantized to {model_args.quantization_bit} bit")