diff --git a/README-ja-JP.md b/README-ja-JP.md index 5e5b7db..71e1d4d 100644 --- a/README-ja-JP.md +++ b/README-ja-JP.md @@ -86,8 +86,8 @@ Transformers を使用して InternLM 7B チャットモデルをロードする ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM ->>> tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b-v1_1", trust_remote_code=True) ->>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b-v1_1", trust_remote_code=True).cuda() +>>> tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) +>>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).cuda() >>> model = model.eval() >>> response, history = model.chat(tokenizer, "こんにちは", history=[]) >>> print(response) diff --git a/README-zh-Hans.md b/README-zh-Hans.md index 62d58d0..7ac8e75 100644 --- a/README-zh-Hans.md +++ b/README-zh-Hans.md @@ -182,7 +182,7 @@ InternLM-7B 包含了一个拥有70亿参数的基础模型和一个为实际场 ```python from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM import torch -model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm-chat-7b-v1_1', revision='v1.0.0') +model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm-chat-7b', revision='v1.0.0') tokenizer = AutoTokenizer.from_pretrained(model_dir, device_map="auto", trust_remote_code=True,torch_dtype=torch.float16) model = AutoModelForCausalLM.from_pretrained(model_dir,device_map="auto", trust_remote_code=True,torch_dtype=torch.float16) model = model.eval() diff --git a/README.md b/README.md index e509963..bd58a76 100644 --- a/README.md +++ b/README.md @@ -158,8 +158,8 @@ To load the InternLM 7B Chat model using Transformers, use the following code: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM ->>> tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b-v1_1", trust_remote_code=True) ->>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b-v1_1", trust_remote_code=True).cuda() +>>> tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) +>>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).cuda() >>> model = model.eval() >>> response, history = model.chat(tokenizer, "hello", history=[]) >>> print(response) @@ -182,7 +182,7 @@ To load the InternLM model using ModelScope, use the following code: ```python from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM import torch -model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm-chat-7b-v1_1', revision='v1.0.0') +model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm-chat-7b', revision='v1.0.0') tokenizer = AutoTokenizer.from_pretrained(model_dir, device_map="auto", trust_remote_code=True,torch_dtype=torch.float16) model = AutoModelForCausalLM.from_pretrained(model_dir,device_map="auto", trust_remote_code=True,torch_dtype=torch.float16) model = model.eval()