mirror of https://github.com/InternLM/InternLM
parent
e7ba85e707
commit
bd57ff3ce7
|
@ -27,7 +27,7 @@ jobs:
|
|||
conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV}
|
||||
source activate internlm-model-latest
|
||||
pip install transformers==${{ matrix.transformers-version }}
|
||||
pip install sentencepiece
|
||||
pip install sentencepiece auto-gptq
|
||||
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py
|
||||
conda deactivate
|
||||
- name: load_latest_hf_model
|
||||
|
@ -36,7 +36,7 @@ jobs:
|
|||
conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV}
|
||||
source activate internlm-model-latest
|
||||
pip install transformers
|
||||
pip install sentencepiece
|
||||
pip install sentencepiece auto-gptq
|
||||
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py
|
||||
conda deactivate
|
||||
- name: remove_env
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 1.7 KiB |
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
Binary file not shown.
After Width: | Height: | Size: 2.8 KiB |
|
@ -1,6 +1,7 @@
|
|||
import pytest
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from PIL import Image
|
||||
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
prompts = ['你好', "what's your name"]
|
||||
|
||||
|
@ -16,24 +17,34 @@ class TestChat:
|
|||
@pytest.mark.parametrize(
|
||||
'model_name',
|
||||
[
|
||||
'internlm/internlm2-chat-7b',
|
||||
'internlm/internlm2-chat-7b-sft',
|
||||
'internlm/internlm2-chat-7b', 'internlm/internlm2-chat-7b-sft',
|
||||
'internlm/internlm2-chat-1_8b', 'internlm/internlm2-chat-1_8b-sft'
|
||||
],
|
||||
)
|
||||
def test_demo_default(self, model_name):
|
||||
@pytest.mark.parametrize(
|
||||
'usefast',
|
||||
[
|
||||
True,
|
||||
False,
|
||||
],
|
||||
)
|
||||
def test_demo_default(self, model_name, usefast):
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
||||
trust_remote_code=True)
|
||||
trust_remote_code=True,
|
||||
use_fast=usefast)
|
||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise
|
||||
# it will be loaded as float32 and might cause OOM Error.
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_name, torch_dtype=torch.float16,
|
||||
trust_remote_code=True).cuda()
|
||||
model = model.eval()
|
||||
history = []
|
||||
for prompt in prompts:
|
||||
response, history = model.chat(tokenizer, prompt, history=[])
|
||||
response, history = model.chat(tokenizer, prompt, history=history)
|
||||
print(response)
|
||||
assert_model(response)
|
||||
|
||||
history = []
|
||||
for prompt in prompts:
|
||||
length = 0
|
||||
for response, history in model.stream_chat(tokenizer,
|
||||
|
@ -50,13 +61,21 @@ class TestBase:
|
|||
@pytest.mark.parametrize(
|
||||
'model_name',
|
||||
[
|
||||
'internlm/internlm2-7b',
|
||||
'internlm/internlm2-base-7b',
|
||||
'internlm/internlm2-7b', 'internlm/internlm2-base-7b',
|
||||
'internlm/internlm2-1_8b'
|
||||
],
|
||||
)
|
||||
def test_demo_default(self, model_name):
|
||||
@pytest.mark.parametrize(
|
||||
'usefast',
|
||||
[
|
||||
True,
|
||||
False,
|
||||
],
|
||||
)
|
||||
def test_demo_default(self, model_name, usefast):
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
||||
trust_remote_code=True)
|
||||
trust_remote_code=True,
|
||||
use_fast=usefast)
|
||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise
|
||||
# it will be loaded as float32 and might cause OOM Error.
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
|
@ -78,3 +97,129 @@ class TestBase:
|
|||
skip_special_tokens=True)
|
||||
print(output)
|
||||
assert_model(output)
|
||||
|
||||
|
||||
class TestMath:
|
||||
"""Test cases for base model."""
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'model_name',
|
||||
['internlm/internlm2-math-7b', 'internlm/internlm2-math-base-7b'],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
'usefast',
|
||||
[
|
||||
True,
|
||||
False,
|
||||
],
|
||||
)
|
||||
def test_demo_default(self, model_name, usefast):
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
||||
trust_remote_code=True,
|
||||
use_fast=usefast)
|
||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise
|
||||
# it will be loaded as float32 and might cause OOM Error.
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_name, trust_remote_code=True,
|
||||
torch_dtype=torch.float16).cuda()
|
||||
model = model.eval()
|
||||
model = model.eval()
|
||||
response, history = model.chat(tokenizer,
|
||||
'1+1=',
|
||||
history=[],
|
||||
meta_instruction='')
|
||||
print(response)
|
||||
assert_model(response)
|
||||
assert '2' in response
|
||||
|
||||
|
||||
class TestMMModel:
|
||||
"""Test cases for base model."""
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'model_name',
|
||||
[
|
||||
'internlm/internlm-xcomposer2-7b',
|
||||
],
|
||||
)
|
||||
def test_demo_default(self, model_name):
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
||||
trust_remote_code=True)
|
||||
# Set `torch_dtype=torch.float16` to load model in float16, otherwise
|
||||
# it will be loaded as float32 and might cause OOM Error.
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_name, torch_dtype=torch.float32,
|
||||
trust_remote_code=True).cuda()
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
||||
trust_remote_code=True)
|
||||
|
||||
model = model.eval()
|
||||
img_path_list = [
|
||||
'tests/panda.jpg',
|
||||
'tests/bamboo.jpeg',
|
||||
]
|
||||
images = []
|
||||
for img_path in img_path_list:
|
||||
image = Image.open(img_path).convert('RGB')
|
||||
image = model.vis_processor(image)
|
||||
images.append(image)
|
||||
image = torch.stack(images)
|
||||
query = '<ImageHere> <ImageHere>please write an article ' \
|
||||
+ 'based on the images. Title: my favorite animal.'
|
||||
with torch.cuda.amp.autocast():
|
||||
response, history = model.chat(tokenizer,
|
||||
query=query,
|
||||
image=image,
|
||||
history=[],
|
||||
do_sample=False)
|
||||
print(response)
|
||||
assert len(response) != 0
|
||||
assert 'panda' in response
|
||||
|
||||
query = '<ImageHere> <ImageHere>请根据图片写一篇作文:我最喜欢的小动物。' \
|
||||
+ '要求:选准角度,确定立意,明确文体,自拟标题。'
|
||||
with torch.cuda.amp.autocast():
|
||||
response, history = model.chat(tokenizer,
|
||||
query=query,
|
||||
image=image,
|
||||
history=[],
|
||||
do_sample=False)
|
||||
print(response)
|
||||
assert len(response) != 0
|
||||
assert '熊猫' in response
|
||||
|
||||
|
||||
class TestMMVlModel:
|
||||
"""Test cases for base model."""
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'model_name',
|
||||
[
|
||||
'internlm/internlm-xcomposer2-vl-7b',
|
||||
],
|
||||
)
|
||||
def test_demo_default(self, model_name):
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
||||
trust_remote_code=True)
|
||||
|
||||
torch.set_grad_enabled(False)
|
||||
|
||||
# init model and tokenizer
|
||||
model = AutoModel.from_pretrained(
|
||||
model_name, trust_remote_code=True).cuda().eval()
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
||||
trust_remote_code=True)
|
||||
|
||||
query = '<ImageHere>Please describe this image in detail.'
|
||||
image = 'tests/image.webp'
|
||||
with torch.cuda.amp.autocast():
|
||||
response, _ = model.chat(tokenizer,
|
||||
query=query,
|
||||
image=image,
|
||||
history=[],
|
||||
do_sample=False)
|
||||
print(response)
|
||||
assert len(response) != 0
|
||||
assert 'Oscar Wilde' in response
|
||||
assert 'Live life with no excuses, travel with no regret' in response
|
||||
|
|
Loading…
Reference in New Issue