diff --git a/README-zh-Hans.md b/README-zh-Hans.md index c374344..2281d7d 100644 --- a/README-zh-Hans.md +++ b/README-zh-Hans.md @@ -86,7 +86,7 @@ InternLM ,即书生·浦语大模型,包含面向实用场景的70亿参数 ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) ->>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", torch_dtype=torch.float16, trust_remote_code=True).cuda() +>>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).cuda() >>> model = model.eval() >>> response, history = model.chat(tokenizer, "你好", history=[]) >>> print(response) diff --git a/README.md b/README.md index f1fa219..c66a0ae 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ To load the InternLM 7B Chat model using Transformers, use the following code: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) ->>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", torch_dtype=torch.float16, trust_remote_code=True).cuda() +>>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).cuda() >>> model = model.eval() >>> response, history = model.chat(tokenizer, "hello", history=[]) >>> print(response)