From 92b15b180ddc0edfce2c82fb226e40dbbbe53a40 Mon Sep 17 00:00:00 2001 From: zhulinJulia24 <145004780+zhulinJulia24@users.noreply.github.com> Date: Fri, 1 Mar 2024 09:10:06 +0800 Subject: [PATCH] add xcomposer 4bit and internlm2-chat-20b test case --- tests/test_hf_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_hf_model.py b/tests/test_hf_model.py index ddee7b1..798d3a6 100644 --- a/tests/test_hf_model.py +++ b/tests/test_hf_model.py @@ -177,7 +177,7 @@ class TestMMModel: model_name, trust_remote_code=True, device='cuda:0').eval() else: model = AutoModelForCausalLM.from_pretrained( - model_name, torch_dtype=torch.float16, + model_name, torch_dtype=torch.float32, trust_remote_code=True).cuda() tokenizer = AutoTokenizer.from_pretrained(model_name,