mirror of https://github.com/THUDM/ChatGLM2-6B
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
70 lines
3.1 KiB
70 lines
3.1 KiB
# coding=utf-8 |
|
# Copyright 2020-present the HuggingFace Inc. team. |
|
# |
|
# Licensed under the Apache License, Version 2.0 (the "License"); |
|
# you may not use this file except in compliance with the License. |
|
# You may obtain a copy of the License at |
|
# |
|
# http://www.apache.org/licenses/LICENSE-2.0 |
|
# |
|
# Unless required by applicable law or agreed to in writing, software |
|
# distributed under the License is distributed on an "AS IS" BASIS, |
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
# See the License for the specific language governing permissions and |
|
# limitations under the License. |
|
""" |
|
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task. |
|
""" |
|
import os |
|
from typing import Optional |
|
from transformers import Trainer |
|
|
|
import torch |
|
from transformers.modeling_utils import PreTrainedModel, unwrap_model |
|
from transformers.utils import logging |
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
WEIGHTS_NAME = "pytorch_model.bin" |
|
TRAINING_ARGS_NAME = "training_args.bin" |
|
|
|
|
|
class PrefixTrainer(Trainer): |
|
def __init__(self, *args, save_changed=False, **kwargs): |
|
self.save_changed = save_changed |
|
super().__init__(*args, **kwargs) |
|
|
|
def _save(self, output_dir: Optional[str] = None, state_dict=None): |
|
# If we are executing this function, we are the process zero, so we don't check for that. |
|
output_dir = output_dir if output_dir is not None else self.args.output_dir |
|
os.makedirs(output_dir, exist_ok=True) |
|
logger.info(f"Saving model checkpoint to {output_dir}") |
|
# Save a trained model and configuration using `save_pretrained()`. |
|
# They can then be reloaded using `from_pretrained()` |
|
if not isinstance(self.model, PreTrainedModel): |
|
if isinstance(unwrap_model(self.model), PreTrainedModel): |
|
if state_dict is None: |
|
state_dict = self.model.state_dict() |
|
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict) |
|
else: |
|
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") |
|
if state_dict is None: |
|
state_dict = self.model.state_dict() |
|
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) |
|
else: |
|
if self.save_changed: |
|
print("Saving PrefixEncoder") |
|
state_dict = self.model.state_dict() |
|
filtered_state_dict = {} |
|
for k, v in self.model.named_parameters(): |
|
if v.requires_grad: |
|
filtered_state_dict[k] = state_dict[k] |
|
self.model.save_pretrained(output_dir, state_dict=filtered_state_dict) |
|
else: |
|
print("Saving the whole model") |
|
self.model.save_pretrained(output_dir, state_dict=state_dict) |
|
if self.tokenizer is not None: |
|
self.tokenizer.save_pretrained(output_dir) |
|
|
|
# Good practice: save your training arguments together with the trained model |
|
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
|
|
|