Compress Pictures

pull/710/head
zhulin1 2024-02-27 15:10:23 +08:00
parent b073d840de
commit 4ca04ddcc3
4 changed files with 36 additions and 10 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 239 KiB

After

Width:  |  Height:  |  Size: 2.8 KiB

View File

@ -9,6 +9,8 @@ prompts = ['你好', "what's your name"]
def assert_model(response): def assert_model(response):
assert len(response) != 0 assert len(response) != 0
assert 'UNUSED_TOKEN' not in response assert 'UNUSED_TOKEN' not in response
assert 'Mynameis' not in response
assert 'Iama' not in response
class TestChat: class TestChat:
@ -21,9 +23,17 @@ class TestChat:
'internlm/internlm2-chat-1_8b', 'internlm/internlm2-chat-1_8b-sft' 'internlm/internlm2-chat-1_8b', 'internlm/internlm2-chat-1_8b-sft'
], ],
) )
def test_demo_default(self, model_name): @pytest.mark.parametrize(
'usefast',
[
True,
False,
],
)
def test_demo_default(self, model_name, usefast):
tokenizer = AutoTokenizer.from_pretrained(model_name, tokenizer = AutoTokenizer.from_pretrained(model_name,
trust_remote_code=True) trust_remote_code=True,
use_fast=usefast)
# Set `torch_dtype=torch.float16` to load model in float16, otherwise # Set `torch_dtype=torch.float16` to load model in float16, otherwise
# it will be loaded as float32 and might cause OOM Error. # it will be loaded as float32 and might cause OOM Error.
model = AutoModelForCausalLM.from_pretrained( model = AutoModelForCausalLM.from_pretrained(
@ -57,9 +67,17 @@ class TestBase:
'internlm/internlm2-1_8b' 'internlm/internlm2-1_8b'
], ],
) )
def test_demo_default(self, model_name): @pytest.mark.parametrize(
'usefast',
[
True,
False,
],
)
def test_demo_default(self, model_name, usefast):
tokenizer = AutoTokenizer.from_pretrained(model_name, tokenizer = AutoTokenizer.from_pretrained(model_name,
trust_remote_code=True) trust_remote_code=True,
use_fast=usefast)
# Set `torch_dtype=torch.float16` to load model in float16, otherwise # Set `torch_dtype=torch.float16` to load model in float16, otherwise
# it will be loaded as float32 and might cause OOM Error. # it will be loaded as float32 and might cause OOM Error.
model = AutoModelForCausalLM.from_pretrained( model = AutoModelForCausalLM.from_pretrained(
@ -90,9 +108,17 @@ class TestMath:
'model_name', 'model_name',
['internlm/internlm2-math-7b', 'internlm/internlm2-math-base-7b'], ['internlm/internlm2-math-7b', 'internlm/internlm2-math-base-7b'],
) )
def test_demo_default(self, model_name): @pytest.mark.parametrize(
'usefast',
[
True,
False,
],
)
def test_demo_default(self, model_name, usefast):
tokenizer = AutoTokenizer.from_pretrained(model_name, tokenizer = AutoTokenizer.from_pretrained(model_name,
trust_remote_code=True) trust_remote_code=True,
use_fast=usefast)
# Set `torch_dtype=torch.float16` to load model in float16, otherwise # Set `torch_dtype=torch.float16` to load model in float16, otherwise
# it will be loaded as float32 and might cause OOM Error. # it will be loaded as float32 and might cause OOM Error.
model = AutoModelForCausalLM.from_pretrained( model = AutoModelForCausalLM.from_pretrained(
@ -106,6 +132,7 @@ class TestMath:
meta_instruction='') meta_instruction='')
print(response) print(response)
assert_model(response) assert_model(response)
assert '2' in response
class TestMMModel: class TestMMModel:
@ -122,12 +149,12 @@ class TestMMModel:
trust_remote_code=True) trust_remote_code=True)
# Set `torch_dtype=torch.float16` to load model in float16, otherwise # Set `torch_dtype=torch.float16` to load model in float16, otherwise
# it will be loaded as float32 and might cause OOM Error. # it will be loaded as float32 and might cause OOM Error.
model = AutoModelForCausalLM.from_pretrained( model = AutoModelForCausalLM.from_pretrained(
model_name, torch_dtype=torch.float32, model_name, torch_dtype=torch.float32,
trust_remote_code=True).cuda() trust_remote_code=True).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_name, tokenizer = AutoTokenizer.from_pretrained(model_name,
trust_remote_code=True) trust_remote_code=True)
model = model.eval() model = model.eval()
img_path_list = [ img_path_list = [
@ -184,7 +211,7 @@ class TestMMVlModel:
model = AutoModel.from_pretrained( model = AutoModel.from_pretrained(
model_name, trust_remote_code=True).cuda().eval() model_name, trust_remote_code=True).cuda().eval()
tokenizer = AutoTokenizer.from_pretrained(model_name, tokenizer = AutoTokenizer.from_pretrained(model_name,
trust_remote_code=True) trust_remote_code=True)
query = '<ImageHere>Please describe this image in detail.' query = '<ImageHere>Please describe this image in detail.'
image = 'tests/image.webp' image = 'tests/image.webp'
@ -198,4 +225,3 @@ class TestMMVlModel:
assert len(response) != 0 assert len(response) != 0
assert 'Oscar Wilde' in response assert 'Oscar Wilde' in response
assert 'Live life with no excuses, travel with no regret' in response assert 'Live life with no excuses, travel with no regret' in response