mirror of https://github.com/InternLM/InternLM
fix(readme): fix model path in readme (#474)
parent
b9c813a972
commit
42ad9cc786
|
@ -108,7 +108,7 @@ Transformers を使用して InternLM 7B チャットモデルをロードする
|
|||
```python
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model_path = "/mnt/petrelfs/share_data/xingshuhao/internlm-chat-7b/"
|
||||
model_path = "internlm/internlm-chat-7b"
|
||||
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ InternLM-7B 包含了一个拥有70亿参数的基础模型和一个为实际场
|
|||
```python
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model_path = "/mnt/petrelfs/share_data/xingshuhao/internlm-chat-7b/"
|
||||
model_path = "internlm/internlm-chat-7b"
|
||||
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ The responses can be streamed using `stream_chat`:
|
|||
```python
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model_path = "/mnt/petrelfs/share_data/xingshuhao/internlm-chat-7b/"
|
||||
model_path = "internlm/internlm-chat-7b"
|
||||
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
|
||||
|
|
Loading…
Reference in New Issue