mirror of https://github.com/InternLM/InternLM
fix AutoModel
parent
3ab5c5294d
commit
59d7a1d58d
|
@ -17,9 +17,8 @@ python tools/transformers/convert2hf.py --src_folder origin_ckpt/ --tgt_folder h
|
||||||
然后可以使用 `from_pretrained` 接口加载:
|
然后可以使用 `from_pretrained` 接口加载:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from modeling_internlm import InternLMForCausalLM
|
>>> from transformers import AutoTokenizer, AutoModel
|
||||||
|
>>> model = AutoModel.from_pretrained("hf_ckpt/", trust_remote_code=True).cuda()
|
||||||
model = InternForCausalLM.from_pretrained("hf_ckpt/")
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -16,9 +16,8 @@ python tools/transformers/convert2hf.py --src_folder origin_ckpt/ --tgt_folder h
|
||||||
Then, you can load it using the `from_pretrained` interface:
|
Then, you can load it using the `from_pretrained` interface:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from modeling_internlm import InternLMForCausalLM
|
>>> from transformers import AutoTokenizer, AutoModel
|
||||||
|
>>> model = AutoModel.from_pretrained("hf_ckpt/", trust_remote_code=True).cuda()
|
||||||
model = InternForCausalLM.from_pretrained("hf_ckpt/")
|
|
||||||
```
|
```
|
||||||
|
|
||||||
`intern_moss_example.py` demonstrates an example of how to use LoRA for fine-tuning on the `fnlp/moss-moon-002-sft` dataset.
|
`intern_moss_example.py` demonstrates an example of how to use LoRA for fine-tuning on the `fnlp/moss-moon-002-sft` dataset.
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import argparse
|
import argparse
|
||||||
import math
|
import math
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import tempfile
|
import tempfile
|
||||||
|
@ -163,6 +164,12 @@ if __name__ == "__main__":
|
||||||
|
|
||||||
os.makedirs(target_folder, exist_ok=True)
|
os.makedirs(target_folder, exist_ok=True)
|
||||||
model.save_pretrained(target_folder, max_shard_size="20GB")
|
model.save_pretrained(target_folder, max_shard_size="20GB")
|
||||||
|
# TODO There should be a better way to add this.
|
||||||
|
with open(os.path.join(target_folder, "config.json")) as fp:
|
||||||
|
config_dict = json.load(fp)
|
||||||
|
config_dict["auto_map"]["AutoModel"] = "modeling_internlm.InternLMModel"
|
||||||
|
with open(os.path.join(target_folder, "config.json"), "w") as fp:
|
||||||
|
json.dump(config_dict, fp, indent=2)
|
||||||
|
|
||||||
tokenizer = InternLMTokenizer(args.tokenizer)
|
tokenizer = InternLMTokenizer(args.tokenizer)
|
||||||
tokenizer.save_pretrained(target_folder)
|
tokenizer.save_pretrained(target_folder)
|
||||||
|
|
Loading…
Reference in New Issue