From 1a368afd267f67a2d0d18e611827deb15a7544b1 Mon Sep 17 00:00:00 2001 From: rainatam Date: Wed, 12 Apr 2023 16:43:34 +0800 Subject: [PATCH] Update README --- ptuning/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ptuning/README.md b/ptuning/README.md index 5c41771..9f4d0a0 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -141,7 +141,7 @@ from transformers import AutoConfig, AutoModel, AutoTokenizer # Load model and tokenizer of ChatGLM-6B config = AutoConfig.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True, pre_seq_len=128) tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", config=config, trust_remote_code=True).half().cuda() +model = AutoModel.from_pretrained("THUDM/chatglm-6b", config=config, trust_remote_code=True) # Load PrefixEncoder prefix_state_dict = torch.load(os.path.join(CHECKPOINT_PATH, "pytorch_model.bin")) @@ -150,6 +150,10 @@ for k, v in prefix_state_dict.items(): new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) +print(f"Quantized to 4 bit") +model = model.quantize(4) +model = model.half().cuda() +model.transformer.prefix_encoder.float() model = model.eval() response, history = model.chat(tokenizer, "你好", history=[])