|
28 | 28 | import jieba
|
29 | 29 | from rouge_chinese import Rouge
|
30 | 30 | from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
|
| 31 | +import torch |
31 | 32 |
|
32 | 33 | import transformers
|
33 | 34 | from transformers import (
|
@@ -110,13 +111,28 @@ def main():
|
110 | 111 |
|
111 | 112 | tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, trust_remote_code=True)
|
112 | 113 |
|
113 |
| - model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) |
| 114 | + if model_args.ptuning_checkpoint is not None: |
| 115 | + # Evaluation |
| 116 | + # Loading extra state dict of prefix encoder |
| 117 | + model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) |
| 118 | + prefix_state_dict = torch.load(os.path.join(model_args.ptuning_checkpoint, "pytorch_model.bin")) |
| 119 | + new_prefix_state_dict = {} |
| 120 | + for k, v in prefix_state_dict.items(): |
| 121 | + new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v |
| 122 | + model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) |
| 123 | + else: |
| 124 | + model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) |
114 | 125 |
|
115 | 126 | if model_args.quantization_bit is not None:
|
116 | 127 | print(f"Quantized to {model_args.quantization_bit} bit")
|
117 | 128 | model = model.quantize(model_args.quantization_bit)
|
118 |
| - model = model.half() |
119 |
| - model.transformer.prefix_encoder.float() |
| 129 | + if model_args.pre_seq_len is not None: |
| 130 | + # P-tuning v2 |
| 131 | + model = model.half() |
| 132 | + model.transformer.prefix_encoder.float() |
| 133 | + else: |
| 134 | + # Finetune |
| 135 | + model = model.float() |
120 | 136 |
|
121 | 137 | prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
|
122 | 138 |
|
|
0 commit comments