mirror of https://github.com/hpcaitech/ColossalAI
69 lines
2.6 KiB
Python
69 lines
2.6 KiB
Python
from functools import partial
|
|
|
|
import torch
|
|
from transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel, LlamaRMSNorm
|
|
|
|
# import colossalai
|
|
from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy
|
|
|
|
from ..modeling.llama import LlamaInferenceForwards, get_llama_vllm_rmsnorm_forward
|
|
|
|
try:
|
|
from colossalai.kernel.triton import rmsnorm_forward
|
|
HAS_TRITON_RMSNORM = True
|
|
except:
|
|
print("you should install triton from https://github.com/openai/triton")
|
|
HAS_TRITON_RMSNORM = False
|
|
|
|
|
|
def get_triton_rmsnorm_forward():
|
|
if HAS_TRITON_RMSNORM:
|
|
|
|
def _triton_rmsnorm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor):
|
|
return rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon)
|
|
|
|
return _triton_rmsnorm_forward
|
|
else:
|
|
return None
|
|
|
|
|
|
class LlamaModelInferPolicy(LlamaForCausalLMPolicy):
|
|
|
|
def __init__(self) -> None:
|
|
super().__init__()
|
|
|
|
def module_policy(self):
|
|
policy = super().module_policy()
|
|
self.shard_config._infer()
|
|
|
|
infer_forward = LlamaInferenceForwards.llama_model_forward
|
|
method_replacement = {'forward': partial(infer_forward)}
|
|
self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel)
|
|
|
|
infer_forward = LlamaInferenceForwards.llama_decoder_layer_forward
|
|
method_replacement = {'forward': partial(infer_forward)}
|
|
self.append_or_create_method_replacement(description=method_replacement,
|
|
policy=policy,
|
|
target_key=LlamaDecoderLayer)
|
|
|
|
infer_forward = LlamaInferenceForwards.llama_flash_attn_kvcache_forward
|
|
method_replacement = {'forward': partial(infer_forward)}
|
|
self.append_or_create_method_replacement(description=method_replacement,
|
|
policy=policy,
|
|
target_key=LlamaAttention)
|
|
|
|
infer_forward = None
|
|
if HAS_TRITON_RMSNORM:
|
|
infer_forward = get_triton_rmsnorm_forward()
|
|
else:
|
|
# NOTE: adding rms_norm from cuda kernels caused precision issue, fix @tiandiao123
|
|
infer_forward = get_llama_vllm_rmsnorm_forward()
|
|
|
|
if infer_forward is not None:
|
|
method_replacement = {'forward': partial(infer_forward)}
|
|
self.append_or_create_method_replacement(description=method_replacement,
|
|
policy=policy,
|
|
target_key=LlamaRMSNorm)
|
|
|
|
return policy
|