diff --git a/README-ja-JP.md b/README-ja-JP.md index 77736fc..ac1efba 100644 --- a/README-ja-JP.md +++ b/README-ja-JP.md @@ -108,7 +108,7 @@ Transformers を使用して InternLM 7B チャットモデルをロードする ```python from transformers import AutoModelForCausalLM, AutoTokenizer -model_path = "/mnt/petrelfs/share_data/xingshuhao/internlm-chat-7b/" +model_path = "internlm/internlm-chat-7b" model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) diff --git a/README-zh-Hans.md b/README-zh-Hans.md index edb64df..764eeca 100644 --- a/README-zh-Hans.md +++ b/README-zh-Hans.md @@ -183,7 +183,7 @@ InternLM-7B 包含了一个拥有70亿参数的基础模型和一个为实际场 ```python from transformers import AutoModelForCausalLM, AutoTokenizer -model_path = "/mnt/petrelfs/share_data/xingshuhao/internlm-chat-7b/" +model_path = "internlm/internlm-chat-7b" model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) diff --git a/README.md b/README.md index 9983337..eb21166 100644 --- a/README.md +++ b/README.md @@ -180,7 +180,7 @@ The responses can be streamed using `stream_chat`: ```python from transformers import AutoModelForCausalLM, AutoTokenizer -model_path = "/mnt/petrelfs/share_data/xingshuhao/internlm-chat-7b/" +model_path = "internlm/internlm-chat-7b" model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)