mirror of https://github.com/hpcaitech/ColossalAI
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
94 lines
3.3 KiB
94 lines
3.3 KiB
2 years ago
|
from typing import Optional, Tuple, Union
|
||
|
|
||
|
import torch
|
||
|
import torch.nn as nn
|
||
|
import torch.nn.functional as F
|
||
|
from coati.models.generation import generate
|
||
1 year ago
|
from coati.models.utils import log_probs_from_logits
|
||
2 years ago
|
from peft import PeftModel
|
||
2 years ago
|
from torch.nn.modules import Module
|
||
|
from transformers import BloomConfig, BloomForCausalLM
|
||
|
|
||
2 years ago
|
|
||
|
class Actor(Module):
|
||
|
"""
|
||
|
Actor model base class.
|
||
|
|
||
|
Args:
|
||
|
model (nn.Module): Actor Model.
|
||
|
"""
|
||
|
|
||
|
def __init__(self, model: nn.Module) -> None:
|
||
|
super().__init__()
|
||
|
self.model = model
|
||
|
|
||
|
@torch.no_grad()
|
||
|
def generate(
|
||
1 year ago
|
self, input_ids: torch.Tensor, return_action_mask: bool = True, **kwargs
|
||
2 years ago
|
) -> Union[Tuple[torch.LongTensor, torch.LongTensor], Tuple[torch.LongTensor, torch.LongTensor, torch.BoolTensor]]:
|
||
|
sequences = generate(self.model, input_ids, **kwargs)
|
||
|
attention_mask = None
|
||
1 year ago
|
pad_token_id = kwargs.get("pad_token_id", None)
|
||
2 years ago
|
if pad_token_id is not None:
|
||
|
attention_mask = sequences.not_equal(pad_token_id).to(dtype=torch.long, device=sequences.device)
|
||
|
if not return_action_mask:
|
||
|
return sequences, attention_mask, None
|
||
|
input_len = input_ids.size(1)
|
||
1 year ago
|
eos_token_id = kwargs.get("eos_token_id", None)
|
||
2 years ago
|
if eos_token_id is None:
|
||
|
action_mask = torch.ones_like(sequences, dtype=torch.bool)
|
||
|
else:
|
||
|
# left padding may be applied, only mask action
|
||
|
action_mask = (sequences[:, input_len:] == eos_token_id).cumsum(dim=-1) == 0
|
||
1 year ago
|
action_mask = F.pad(action_mask, (1 + input_len, -1), value=True) # include eos token and input
|
||
2 years ago
|
action_mask[:, :input_len] = False
|
||
|
action_mask = action_mask[:, 1:]
|
||
1 year ago
|
return sequences, attention_mask, action_mask[:, -(sequences.size(1) - input_len) :]
|
||
2 years ago
|
|
||
1 year ago
|
def forward(
|
||
|
self, sequences: torch.LongTensor, num_actions: int, attention_mask: Optional[torch.Tensor] = None
|
||
|
) -> torch.Tensor:
|
||
|
"""Returns action log probs"""
|
||
2 years ago
|
output = self.model(sequences, attention_mask=attention_mask)
|
||
1 year ago
|
logits = output["logits"]
|
||
2 years ago
|
log_probs = log_probs_from_logits(logits[:, :-1, :], sequences[:, 1:])
|
||
|
return log_probs[:, -num_actions:]
|
||
|
|
||
|
def get_base_model(self):
|
||
|
return self.model
|
||
|
|
||
|
|
||
|
class BLOOMActor(Actor):
|
||
|
"""
|
||
|
BLOOM Actor model.
|
||
|
|
||
|
Args:
|
||
|
pretrained (str): Pretrained model name or path.
|
||
|
config (BloomConfig): Model config.
|
||
|
checkpoint (bool): Enable gradient checkpointing.
|
||
|
lora_rank (int): LoRA rank.
|
||
|
lora_train_bias (str): LoRA bias training mode.
|
||
|
"""
|
||
|
|
||
1 year ago
|
def __init__(
|
||
|
self,
|
||
|
pretrained: str = None,
|
||
|
config: Optional[BloomConfig] = None,
|
||
|
checkpoint: bool = False,
|
||
|
lora_path: str = None,
|
||
|
) -> None:
|
||
2 years ago
|
if pretrained is not None:
|
||
|
model = BloomForCausalLM.from_pretrained(pretrained)
|
||
|
elif config is not None:
|
||
|
model = BloomForCausalLM(config)
|
||
|
else:
|
||
|
model = BloomForCausalLM(BloomConfig())
|
||
|
if lora_path is not None:
|
||
2 years ago
|
model = PeftModel.from_pretrained(model, lora_path)
|
||
2 years ago
|
if checkpoint:
|
||
|
model.gradient_checkpointing_enable()
|
||
|
super().__init__(model)
|
||
2 years ago
|
|
||
2 years ago
|
def print_trainable_parameters(self):
|
||
|
self.get_base_model().print_trainable_parameters()
|