2024-01-24 12:50:39 +00:00
|
|
|
import pytest
|
|
|
|
import torch
|
2024-02-27 09:20:11 +00:00
|
|
|
from PIL import Image
|
|
|
|
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer
|
2024-01-24 12:50:39 +00:00
|
|
|
|
2024-01-26 09:26:04 +00:00
|
|
|
prompts = ['你好', "what's your name"]
|
2024-01-24 12:50:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
def assert_model(response):
|
|
|
|
assert len(response) != 0
|
2024-01-26 09:26:04 +00:00
|
|
|
assert 'UNUSED_TOKEN' not in response
|
2024-01-24 12:50:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
class TestChat:
|
2024-01-26 09:26:04 +00:00
|
|
|
"""Test cases for chat model."""
|
2024-01-24 12:50:39 +00:00
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
2024-01-26 09:26:04 +00:00
|
|
|
'model_name',
|
2024-01-24 12:50:39 +00:00
|
|
|
[
|
2024-02-27 09:20:11 +00:00
|
|
|
'internlm/internlm2-chat-7b', 'internlm/internlm2-chat-7b-sft',
|
|
|
|
'internlm/internlm2-chat-1_8b', 'internlm/internlm2-chat-1_8b-sft'
|
2024-01-24 12:50:39 +00:00
|
|
|
],
|
|
|
|
)
|
2024-02-27 09:20:11 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'usefast',
|
|
|
|
[
|
|
|
|
True,
|
|
|
|
False,
|
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_demo_default(self, model_name, usefast):
|
2024-01-26 09:26:04 +00:00
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
2024-02-27 09:20:11 +00:00
|
|
|
trust_remote_code=True,
|
|
|
|
use_fast=usefast)
|
2024-01-24 12:50:39 +00:00
|
|
|
# Set `torch_dtype=torch.float16` to load model in float16, otherwise
|
|
|
|
# it will be loaded as float32 and might cause OOM Error.
|
|
|
|
model = AutoModelForCausalLM.from_pretrained(
|
2024-01-26 09:26:04 +00:00
|
|
|
model_name, torch_dtype=torch.float16,
|
|
|
|
trust_remote_code=True).cuda()
|
2024-01-24 12:50:39 +00:00
|
|
|
model = model.eval()
|
2024-02-27 09:20:11 +00:00
|
|
|
history = []
|
2024-01-24 12:50:39 +00:00
|
|
|
for prompt in prompts:
|
2024-02-27 09:20:11 +00:00
|
|
|
response, history = model.chat(tokenizer, prompt, history=history)
|
2024-01-24 12:50:39 +00:00
|
|
|
print(response)
|
|
|
|
assert_model(response)
|
|
|
|
|
2024-02-27 09:20:11 +00:00
|
|
|
history = []
|
2024-01-24 12:50:39 +00:00
|
|
|
for prompt in prompts:
|
|
|
|
length = 0
|
2024-01-26 09:26:04 +00:00
|
|
|
for response, history in model.stream_chat(tokenizer,
|
|
|
|
prompt,
|
|
|
|
history=[]):
|
|
|
|
print(response[length:], flush=True, end='')
|
2024-01-24 12:50:39 +00:00
|
|
|
length = len(response)
|
|
|
|
assert_model(response)
|
|
|
|
|
|
|
|
|
|
|
|
class TestBase:
|
2024-01-26 09:26:04 +00:00
|
|
|
"""Test cases for base model."""
|
2024-01-24 12:50:39 +00:00
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
2024-01-26 09:26:04 +00:00
|
|
|
'model_name',
|
2024-01-24 12:50:39 +00:00
|
|
|
[
|
2024-02-27 09:20:11 +00:00
|
|
|
'internlm/internlm2-7b', 'internlm/internlm2-base-7b',
|
|
|
|
'internlm/internlm2-1_8b'
|
2024-01-24 12:50:39 +00:00
|
|
|
],
|
|
|
|
)
|
2024-02-27 09:20:11 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'usefast',
|
|
|
|
[
|
|
|
|
True,
|
|
|
|
False,
|
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_demo_default(self, model_name, usefast):
|
2024-01-26 09:26:04 +00:00
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
2024-02-27 09:20:11 +00:00
|
|
|
trust_remote_code=True,
|
|
|
|
use_fast=usefast)
|
2024-01-24 12:50:39 +00:00
|
|
|
# Set `torch_dtype=torch.float16` to load model in float16, otherwise
|
|
|
|
# it will be loaded as float32 and might cause OOM Error.
|
|
|
|
model = AutoModelForCausalLM.from_pretrained(
|
2024-01-26 09:26:04 +00:00
|
|
|
model_name, torch_dtype=torch.float16,
|
|
|
|
trust_remote_code=True).cuda()
|
2024-01-24 12:50:39 +00:00
|
|
|
for prompt in prompts:
|
2024-01-26 09:26:04 +00:00
|
|
|
inputs = tokenizer(prompt, return_tensors='pt')
|
2024-01-24 12:50:39 +00:00
|
|
|
for k, v in inputs.items():
|
|
|
|
inputs[k] = v.cuda()
|
|
|
|
gen_kwargs = {
|
2024-01-26 09:26:04 +00:00
|
|
|
'max_length': 128,
|
|
|
|
'top_p': 10,
|
|
|
|
'temperature': 1.0,
|
|
|
|
'do_sample': True,
|
|
|
|
'repetition_penalty': 1.0,
|
2024-01-24 12:50:39 +00:00
|
|
|
}
|
|
|
|
output = model.generate(**inputs, **gen_kwargs)
|
2024-01-26 09:26:04 +00:00
|
|
|
output = tokenizer.decode(output[0].tolist(),
|
|
|
|
skip_special_tokens=True)
|
2024-01-24 12:50:39 +00:00
|
|
|
print(output)
|
|
|
|
assert_model(output)
|
2024-02-27 09:20:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
class TestMath:
|
|
|
|
"""Test cases for base model."""
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'model_name',
|
|
|
|
['internlm/internlm2-math-7b', 'internlm/internlm2-math-base-7b'],
|
|
|
|
)
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'usefast',
|
|
|
|
[
|
|
|
|
True,
|
|
|
|
False,
|
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_demo_default(self, model_name, usefast):
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
|
|
|
trust_remote_code=True,
|
|
|
|
use_fast=usefast)
|
|
|
|
# Set `torch_dtype=torch.float16` to load model in float16, otherwise
|
|
|
|
# it will be loaded as float32 and might cause OOM Error.
|
|
|
|
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
model_name, trust_remote_code=True,
|
|
|
|
torch_dtype=torch.float16).cuda()
|
|
|
|
model = model.eval()
|
|
|
|
model = model.eval()
|
|
|
|
response, history = model.chat(tokenizer,
|
|
|
|
'1+1=',
|
|
|
|
history=[],
|
|
|
|
meta_instruction='')
|
|
|
|
print(response)
|
|
|
|
assert_model(response)
|
|
|
|
assert '2' in response
|
|
|
|
|
|
|
|
|
|
|
|
class TestMMModel:
|
|
|
|
"""Test cases for base model."""
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'model_name',
|
|
|
|
[
|
|
|
|
'internlm/internlm-xcomposer2-7b',
|
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_demo_default(self, model_name):
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
|
|
|
trust_remote_code=True)
|
|
|
|
# Set `torch_dtype=torch.float16` to load model in float16, otherwise
|
|
|
|
# it will be loaded as float32 and might cause OOM Error.
|
|
|
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
model_name, torch_dtype=torch.float32,
|
|
|
|
trust_remote_code=True).cuda()
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
|
|
|
trust_remote_code=True)
|
|
|
|
|
|
|
|
model = model.eval()
|
|
|
|
img_path_list = [
|
|
|
|
'tests/panda.jpg',
|
|
|
|
'tests/bamboo.jpeg',
|
|
|
|
]
|
|
|
|
images = []
|
|
|
|
for img_path in img_path_list:
|
|
|
|
image = Image.open(img_path).convert('RGB')
|
|
|
|
image = model.vis_processor(image)
|
|
|
|
images.append(image)
|
|
|
|
image = torch.stack(images)
|
|
|
|
query = '<ImageHere> <ImageHere>please write an article ' \
|
|
|
|
+ 'based on the images. Title: my favorite animal.'
|
|
|
|
with torch.cuda.amp.autocast():
|
|
|
|
response, history = model.chat(tokenizer,
|
|
|
|
query=query,
|
|
|
|
image=image,
|
|
|
|
history=[],
|
|
|
|
do_sample=False)
|
|
|
|
print(response)
|
|
|
|
assert len(response) != 0
|
|
|
|
assert 'panda' in response
|
|
|
|
|
|
|
|
query = '<ImageHere> <ImageHere>请根据图片写一篇作文:我最喜欢的小动物。' \
|
|
|
|
+ '要求:选准角度,确定立意,明确文体,自拟标题。'
|
|
|
|
with torch.cuda.amp.autocast():
|
|
|
|
response, history = model.chat(tokenizer,
|
|
|
|
query=query,
|
|
|
|
image=image,
|
|
|
|
history=[],
|
|
|
|
do_sample=False)
|
|
|
|
print(response)
|
|
|
|
assert len(response) != 0
|
|
|
|
assert '熊猫' in response
|
|
|
|
|
|
|
|
|
|
|
|
class TestMMVlModel:
|
|
|
|
"""Test cases for base model."""
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
'model_name',
|
|
|
|
[
|
|
|
|
'internlm/internlm-xcomposer2-vl-7b',
|
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_demo_default(self, model_name):
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
|
|
|
trust_remote_code=True)
|
|
|
|
|
|
|
|
torch.set_grad_enabled(False)
|
|
|
|
|
|
|
|
# init model and tokenizer
|
|
|
|
model = AutoModel.from_pretrained(
|
|
|
|
model_name, trust_remote_code=True).cuda().eval()
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
|
|
|
trust_remote_code=True)
|
|
|
|
|
|
|
|
query = '<ImageHere>Please describe this image in detail.'
|
|
|
|
image = 'tests/image.webp'
|
|
|
|
with torch.cuda.amp.autocast():
|
|
|
|
response, _ = model.chat(tokenizer,
|
|
|
|
query=query,
|
|
|
|
image=image,
|
|
|
|
history=[],
|
|
|
|
do_sample=False)
|
|
|
|
print(response)
|
|
|
|
assert len(response) != 0
|
|
|
|
assert 'Oscar Wilde' in response
|
|
|
|
assert 'Live life with no excuses, travel with no regret' in response
|