|
|
@ -28,6 +28,7 @@ from datasets import load_dataset |
|
|
|
import jieba |
|
|
|
import jieba |
|
|
|
from rouge_chinese import Rouge |
|
|
|
from rouge_chinese import Rouge |
|
|
|
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction |
|
|
|
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction |
|
|
|
|
|
|
|
import torch |
|
|
|
|
|
|
|
|
|
|
|
import transformers |
|
|
|
import transformers |
|
|
|
from transformers import ( |
|
|
|
from transformers import ( |
|
|
@ -110,13 +111,28 @@ def main(): |
|
|
|
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) |
|
|
|
|
|
|
|
|
|
|
|
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) |
|
|
|
if model_args.ptuning_checkpoint is not None: |
|
|
|
|
|
|
|
# Evaluation |
|
|
|
|
|
|
|
# Loading extra state dict of prefix encoder |
|
|
|
|
|
|
|
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) |
|
|
|
|
|
|
|
prefix_state_dict = torch.load(os.path.join(model_args.ptuning_checkpoint, "pytorch_model.bin")) |
|
|
|
|
|
|
|
new_prefix_state_dict = {} |
|
|
|
|
|
|
|
for k, v in prefix_state_dict.items(): |
|
|
|
|
|
|
|
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v |
|
|
|
|
|
|
|
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) |
|
|
|
|
|
|
|
else: |
|
|
|
|
|
|
|
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) |
|
|
|
|
|
|
|
|
|
|
|
if model_args.quantization_bit is not None: |
|
|
|
if model_args.quantization_bit is not None: |
|
|
|
print(f"Quantized to {model_args.quantization_bit} bit") |
|
|
|
print(f"Quantized to {model_args.quantization_bit} bit") |
|
|
|
model = model.quantize(model_args.quantization_bit) |
|
|
|
model = model.quantize(model_args.quantization_bit) |
|
|
|
model = model.half() |
|
|
|
if model_args.pre_seq_len is not None: |
|
|
|
model.transformer.prefix_encoder.float() |
|
|
|
# P-tuning v2 |
|
|
|
|
|
|
|
model = model.half() |
|
|
|
|
|
|
|
model.transformer.prefix_encoder.float() |
|
|
|
|
|
|
|
else: |
|
|
|
|
|
|
|
# Finetune |
|
|
|
|
|
|
|
model = model.float() |
|
|
|
|
|
|
|
|
|
|
|
prefix = data_args.source_prefix if data_args.source_prefix is not None else "" |
|
|
|
prefix = data_args.source_prefix if data_args.source_prefix is not None else "" |
|
|
|
|
|
|
|
|
|
|
|