mirror of https://github.com/InternLM/InternLM
parent
716131e477
commit
e1cefaef6b
|
@ -84,8 +84,8 @@ Transformers を使用して InternLM 7B チャットモデルをロードする
|
||||||
|
|
||||||
```python
|
```python
|
||||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b-v1.1", trust_remote_code=True)
|
>>> tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b-v1_1", trust_remote_code=True)
|
||||||
>>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b-v1.1", trust_remote_code=True).cuda()
|
>>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b-v1_1", trust_remote_code=True).cuda()
|
||||||
>>> model = model.eval()
|
>>> model = model.eval()
|
||||||
>>> response, history = model.chat(tokenizer, "こんにちは", history=[])
|
>>> response, history = model.chat(tokenizer, "こんにちは", history=[])
|
||||||
>>> print(response)
|
>>> print(response)
|
||||||
|
|
|
@ -90,8 +90,8 @@ InternLM ,即书生·浦语大模型,包含面向实用场景的70亿参数
|
||||||
|
|
||||||
```python
|
```python
|
||||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b-v1.1", trust_remote_code=True)
|
>>> tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b-v1_1", trust_remote_code=True)
|
||||||
>>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b-v1.1", trust_remote_code=True).cuda()
|
>>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b-v1_1", trust_remote_code=True).cuda()
|
||||||
>>> model = model.eval()
|
>>> model = model.eval()
|
||||||
>>> response, history = model.chat(tokenizer, "你好", history=[])
|
>>> response, history = model.chat(tokenizer, "你好", history=[])
|
||||||
>>> print(response)
|
>>> print(response)
|
||||||
|
|
|
@ -90,8 +90,8 @@ To load the InternLM 7B Chat model using Transformers, use the following code:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b-v1.1", trust_remote_code=True)
|
>>> tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b-v1_1", trust_remote_code=True)
|
||||||
>>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b-v1.1", trust_remote_code=True).cuda()
|
>>> model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b-v1_1", trust_remote_code=True).cuda()
|
||||||
>>> model = model.eval()
|
>>> model = model.eval()
|
||||||
>>> response, history = model.chat(tokenizer, "hello", history=[])
|
>>> response, history = model.chat(tokenizer, "hello", history=[])
|
||||||
>>> print(response)
|
>>> print(response)
|
||||||
|
|
Loading…
Reference in New Issue