2023-09-18 01:44:27 +00:00
|
|
|
try:
|
|
|
|
import triton
|
2023-09-19 06:20:26 +00:00
|
|
|
|
2023-09-18 01:44:27 +00:00
|
|
|
HAS_TRITON = True
|
2023-09-14 10:03:55 +00:00
|
|
|
|
2023-10-04 01:18:45 +00:00
|
|
|
except ImportError:
|
|
|
|
HAS_TRITON = False
|
|
|
|
print("Triton is not installed. Please install Triton to use Triton kernels.")
|
|
|
|
|
|
|
|
# There may exist import error even if we have triton installed.
|
|
|
|
if HAS_TRITON:
|
2023-09-18 01:44:27 +00:00
|
|
|
from .context_attention import bloom_context_attn_fwd, llama_context_attn_fwd
|
|
|
|
from .copy_kv_cache_dest import copy_kv_cache_to_dest
|
|
|
|
from .fused_layernorm import layer_norm
|
2023-09-22 03:02:50 +00:00
|
|
|
from .gptq_triton import gptq_fused_linear_triton
|
2023-09-18 01:44:27 +00:00
|
|
|
from .rms_norm import rmsnorm_forward
|
|
|
|
from .rotary_embedding_kernel import rotary_embedding_fwd
|
|
|
|
from .softmax import softmax
|
|
|
|
from .token_attention_kernel import token_attention_fwd
|
|
|
|
|
|
|
|
__all__ = [
|
2023-09-19 06:20:26 +00:00
|
|
|
"llama_context_attn_fwd",
|
|
|
|
"bloom_context_attn_fwd",
|
|
|
|
"softmax",
|
|
|
|
"layer_norm",
|
|
|
|
"rmsnorm_forward",
|
|
|
|
"copy_kv_cache_to_dest",
|
|
|
|
"rotary_embedding_fwd",
|
|
|
|
"token_attention_fwd",
|
2023-09-22 03:02:50 +00:00
|
|
|
"gptq_fused_linear_triton",
|
2023-09-18 01:44:27 +00:00
|
|
|
]
|