diff --git a/.github/workflows/daily_tests.yaml b/.github/workflows/daily_tests.yaml index 1e1376f..1a8285f 100644 --- a/.github/workflows/daily_tests.yaml +++ b/.github/workflows/daily_tests.yaml @@ -27,7 +27,7 @@ jobs: conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV} source activate internlm-model-latest pip install transformers==${{ matrix.transformers-version }} - pip install sentencepiece + pip install sentencepiece auto-gptq srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py conda deactivate - name: load_latest_hf_model @@ -36,7 +36,7 @@ jobs: conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV} source activate internlm-model-latest pip install transformers - pip install sentencepiece + pip install sentencepiece auto-gptq srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py conda deactivate - name: remove_env diff --git a/tests/bamboo.jpeg b/tests/bamboo.jpeg new file mode 100644 index 0000000..7a495cb Binary files /dev/null and b/tests/bamboo.jpeg differ diff --git a/tests/image.webp b/tests/image.webp new file mode 100644 index 0000000..2b62798 Binary files /dev/null and b/tests/image.webp differ diff --git a/tests/panda.jpg b/tests/panda.jpg new file mode 100644 index 0000000..b28136d Binary files /dev/null and b/tests/panda.jpg differ diff --git a/tests/test_hf_model.py b/tests/test_hf_model.py index 2f7a06b..f8d30d3 100644 --- a/tests/test_hf_model.py +++ b/tests/test_hf_model.py @@ -1,6 +1,7 @@ import pytest import torch -from transformers import AutoModelForCausalLM, AutoTokenizer +from PIL import Image +from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer prompts = ['你好', "what's your name"] @@ -16,24 +17,34 @@ class TestChat: @pytest.mark.parametrize( 'model_name', [ - 'internlm/internlm2-chat-7b', - 'internlm/internlm2-chat-7b-sft', + 'internlm/internlm2-chat-7b', 'internlm/internlm2-chat-7b-sft', + 'internlm/internlm2-chat-1_8b', 'internlm/internlm2-chat-1_8b-sft' ], ) - def test_demo_default(self, model_name): + @pytest.mark.parametrize( + 'usefast', + [ + True, + False, + ], + ) + def test_demo_default(self, model_name, usefast): tokenizer = AutoTokenizer.from_pretrained(model_name, - trust_remote_code=True) + trust_remote_code=True, + use_fast=usefast) # Set `torch_dtype=torch.float16` to load model in float16, otherwise # it will be loaded as float32 and might cause OOM Error. model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.float16, trust_remote_code=True).cuda() model = model.eval() + history = [] for prompt in prompts: - response, history = model.chat(tokenizer, prompt, history=[]) + response, history = model.chat(tokenizer, prompt, history=history) print(response) assert_model(response) + history = [] for prompt in prompts: length = 0 for response, history in model.stream_chat(tokenizer, @@ -50,13 +61,21 @@ class TestBase: @pytest.mark.parametrize( 'model_name', [ - 'internlm/internlm2-7b', - 'internlm/internlm2-base-7b', + 'internlm/internlm2-7b', 'internlm/internlm2-base-7b', + 'internlm/internlm2-1_8b' ], ) - def test_demo_default(self, model_name): + @pytest.mark.parametrize( + 'usefast', + [ + True, + False, + ], + ) + def test_demo_default(self, model_name, usefast): tokenizer = AutoTokenizer.from_pretrained(model_name, - trust_remote_code=True) + trust_remote_code=True, + use_fast=usefast) # Set `torch_dtype=torch.float16` to load model in float16, otherwise # it will be loaded as float32 and might cause OOM Error. model = AutoModelForCausalLM.from_pretrained( @@ -78,3 +97,129 @@ class TestBase: skip_special_tokens=True) print(output) assert_model(output) + + +class TestMath: + """Test cases for base model.""" + + @pytest.mark.parametrize( + 'model_name', + ['internlm/internlm2-math-7b', 'internlm/internlm2-math-base-7b'], + ) + @pytest.mark.parametrize( + 'usefast', + [ + True, + False, + ], + ) + def test_demo_default(self, model_name, usefast): + tokenizer = AutoTokenizer.from_pretrained(model_name, + trust_remote_code=True, + use_fast=usefast) + # Set `torch_dtype=torch.float16` to load model in float16, otherwise + # it will be loaded as float32 and might cause OOM Error. + model = AutoModelForCausalLM.from_pretrained( + model_name, trust_remote_code=True, + torch_dtype=torch.float16).cuda() + model = model.eval() + model = model.eval() + response, history = model.chat(tokenizer, + '1+1=', + history=[], + meta_instruction='') + print(response) + assert_model(response) + assert '2' in response + + +class TestMMModel: + """Test cases for base model.""" + + @pytest.mark.parametrize( + 'model_name', + [ + 'internlm/internlm-xcomposer2-7b', + ], + ) + def test_demo_default(self, model_name): + tokenizer = AutoTokenizer.from_pretrained(model_name, + trust_remote_code=True) + # Set `torch_dtype=torch.float16` to load model in float16, otherwise + # it will be loaded as float32 and might cause OOM Error. + + model = AutoModelForCausalLM.from_pretrained( + model_name, torch_dtype=torch.float32, + trust_remote_code=True).cuda() + tokenizer = AutoTokenizer.from_pretrained(model_name, + trust_remote_code=True) + + model = model.eval() + img_path_list = [ + 'tests/panda.jpg', + 'tests/bamboo.jpeg', + ] + images = [] + for img_path in img_path_list: + image = Image.open(img_path).convert('RGB') + image = model.vis_processor(image) + images.append(image) + image = torch.stack(images) + query = ' please write an article ' \ + + 'based on the images. Title: my favorite animal.' + with torch.cuda.amp.autocast(): + response, history = model.chat(tokenizer, + query=query, + image=image, + history=[], + do_sample=False) + print(response) + assert len(response) != 0 + assert 'panda' in response + + query = ' 请根据图片写一篇作文:我最喜欢的小动物。' \ + + '要求:选准角度,确定立意,明确文体,自拟标题。' + with torch.cuda.amp.autocast(): + response, history = model.chat(tokenizer, + query=query, + image=image, + history=[], + do_sample=False) + print(response) + assert len(response) != 0 + assert '熊猫' in response + + +class TestMMVlModel: + """Test cases for base model.""" + + @pytest.mark.parametrize( + 'model_name', + [ + 'internlm/internlm-xcomposer2-vl-7b', + ], + ) + def test_demo_default(self, model_name): + tokenizer = AutoTokenizer.from_pretrained(model_name, + trust_remote_code=True) + + torch.set_grad_enabled(False) + + # init model and tokenizer + model = AutoModel.from_pretrained( + model_name, trust_remote_code=True).cuda().eval() + tokenizer = AutoTokenizer.from_pretrained(model_name, + trust_remote_code=True) + + query = 'Please describe this image in detail.' + image = 'tests/image.webp' + with torch.cuda.amp.autocast(): + response, _ = model.chat(tokenizer, + query=query, + image=image, + history=[], + do_sample=False) + print(response) + assert len(response) != 0 + assert 'Oscar Wilde' in response + assert 'Live life with no excuses, travel with no regret' in response