From 32e7f99416c846402d6098419777edee3ddbce7b Mon Sep 17 00:00:00 2001 From: Xuanlei Zhao <43881818+oahzxl@users.noreply.github.com> Date: Mon, 18 Sep 2023 09:44:27 +0800 Subject: [PATCH] [kernel] update triton init #4740 (#4740) --- colossalai/kernel/triton/__init__.py | 30 ++++++++++++++++++---------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/colossalai/kernel/triton/__init__.py b/colossalai/kernel/triton/__init__.py index 5840ad291..75812db03 100644 --- a/colossalai/kernel/triton/__init__.py +++ b/colossalai/kernel/triton/__init__.py @@ -1,12 +1,20 @@ -from .context_attention import bloom_context_attn_fwd, llama_context_attn_fwd -from .copy_kv_cache_dest import copy_kv_cache_to_dest -from .fused_layernorm import layer_norm -from .rms_norm import rmsnorm_forward -from .rotary_embedding_kernel import rotary_embedding_fwd -from .softmax import softmax -from .token_attention_kernel import token_attention_fwd +try: + import triton + HAS_TRITON = True -__all__ = [ - "llama_context_attn_fwd", "bloom_context_attn_fwd", "softmax", "layer_norm", "rmsnorm_forward", - "copy_kv_cache_to_dest", "rotary_embedding_fwd", "token_attention_fwd" -] + from .context_attention import bloom_context_attn_fwd, llama_context_attn_fwd + from .copy_kv_cache_dest import copy_kv_cache_to_dest + from .fused_layernorm import layer_norm + from .rms_norm import rmsnorm_forward + from .rotary_embedding_kernel import rotary_embedding_fwd + from .softmax import softmax + from .token_attention_kernel import token_attention_fwd + + __all__ = [ + "llama_context_attn_fwd", "bloom_context_attn_fwd", "softmax", "layer_norm", "rmsnorm_forward", + "copy_kv_cache_to_dest", "rotary_embedding_fwd", "token_attention_fwd" + ] + +except ImportError: + HAS_TRITON = False + print("Triton is not installed. Please install Triton to use Triton kernels.")