[test] fix chatglm test kit (#5793)

pull/5797/head^2
Hongxin Liu 6 months ago committed by GitHub
parent 74f4a29734
commit 587bbf4c6d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -33,41 +33,40 @@ loss_fn_for_chatglm_model = lambda x: torch.nn.functional.mse_loss(
) )
loss_fn = lambda x: x["loss"] loss_fn = lambda x: x["loss"]
config = AutoConfig.from_pretrained(
infer_config = AutoConfig.from_pretrained(
"THUDM/chatglm2-6b", "THUDM/chatglm2-6b",
trust_remote_code=True, trust_remote_code=True,
num_layers=2, num_layers=2,
padded_vocab_size=65024, padded_vocab_size=65024,
hidden_size=64, hidden_size=128,
ffn_hidden_size=214,
num_attention_heads=8, num_attention_heads=8,
multi_query_attention=True,
multi_query_group_num=2,
kv_channels=16, kv_channels=16,
rmsnorm=True, rmsnorm=True,
original_rope=True, original_rope=True,
use_cache=True, use_cache=True,
multi_query_attention=False,
torch_dtype=torch.float32, torch_dtype=torch.float32,
) )
infer_config = AutoConfig.from_pretrained( def init_chatglm():
config = AutoConfig.from_pretrained(
"THUDM/chatglm2-6b", "THUDM/chatglm2-6b",
trust_remote_code=True, trust_remote_code=True,
num_layers=2, num_layers=2,
padded_vocab_size=65024, padded_vocab_size=65024,
hidden_size=128, hidden_size=64,
ffn_hidden_size=214,
num_attention_heads=8, num_attention_heads=8,
multi_query_attention=True,
multi_query_group_num=2,
kv_channels=16, kv_channels=16,
rmsnorm=True, rmsnorm=True,
original_rope=True, original_rope=True,
use_cache=True, use_cache=True,
multi_query_attention=False,
torch_dtype=torch.float32, torch_dtype=torch.float32,
) )
def init_chatglm():
model = AutoModelForCausalLM.from_config(config, empty_init=False, trust_remote_code=True) model = AutoModelForCausalLM.from_config(config, empty_init=False, trust_remote_code=True)
for m in model.modules(): for m in model.modules():
if m.__class__.__name__ == "RMSNorm": if m.__class__.__name__ == "RMSNorm":

Loading…
Cancel
Save