diff --git a/extensions/README.md b/extensions/README.md index 9914fbfbf..6f5feb55c 100644 --- a/extensions/README.md +++ b/extensions/README.md @@ -3,12 +3,12 @@ ## 📌 Table of Contents - [🔌 Extensions](#-extensions) - - [📌 Table of Contents](#-table-of-contents) - - [📚 Introduction](#-introduction) - - [🪅 Design](#-design) - - [🛠 API Usage](#-api-usage) - - [🏗 Write a customized extension](#-write-a-customized-extension) - - [✏️ Acknowledgement](#️-acknowledgement) + - [📌 Table of Contents](#-table-of-contents) + - [📚 Introduction](#-introduction) + - [🪅 Design](#-design) + - [🛠 API Usage](#-api-usage) + - [🏗 Write a customized extension](#-write-a-customized-extension) + - [✏️ Acknowledgement](#️-acknowledgement) ## 📚 Introduction @@ -46,12 +46,12 @@ kernel = CPUAdamLoader().load() - Case 2: Load a specific kernel -This case applies if you are familar with the extensions available. +This case applies if you are familiar with the extensions available. ```python from colossalai.kernel.kernel_loader import CPUAdamLoader -# load the kernel by giving the kernal name +# load the kernel by giving the kernel name kernel = CPUAdamLoader().load(ext_name="cpu_adam_arm") ``` diff --git a/extensions/cuda_extension.py b/extensions/cuda_extension.py index 188e61b60..b5e8a285b 100644 --- a/extensions/cuda_extension.py +++ b/extensions/cuda_extension.py @@ -20,7 +20,7 @@ class _CudaExtension(_CppExtension): """ def is_hardware_available(self) -> bool: - # cuda extension can only be built if cuda is availabe + # cuda extension can only be built if cuda is available try: import torch diff --git a/extensions/flash_attention/flash_attention_dao_cuda.py b/extensions/flash_attention/flash_attention_dao_cuda.py index 6e2a9a880..1b7f8ac47 100644 --- a/extensions/flash_attention/flash_attention_dao_cuda.py +++ b/extensions/flash_attention/flash_attention_dao_cuda.py @@ -6,7 +6,7 @@ class FlashAttentionDaoCudaExtension(_Extension): super().__init__(name="flash_attention_dao_cuda", support_aot=False, support_jit=False, priority=10) def is_hardware_available(self) -> bool: - # cuda extension can only be built if cuda is availabe + # cuda extension can only be built if cuda is available try: import torch diff --git a/extensions/flash_attention/flash_attention_xformers_cuda.py b/extensions/flash_attention/flash_attention_xformers_cuda.py index 737b8599f..27cd823de 100644 --- a/extensions/flash_attention/flash_attention_xformers_cuda.py +++ b/extensions/flash_attention/flash_attention_xformers_cuda.py @@ -6,7 +6,7 @@ class FlashAttentionXformersCudaExtension(_Extension): super().__init__(name="flash_attention_xformers_cuda", support_aot=False, support_jit=False) def is_hardware_available(self) -> bool: - # cuda extension can only be built if cuda is availabe + # cuda extension can only be built if cuda is available try: import torch diff --git a/extensions/triton_extension.py b/extensions/triton_extension.py index b3f61644e..9f0792f8c 100644 --- a/extensions/triton_extension.py +++ b/extensions/triton_extension.py @@ -8,7 +8,7 @@ class _TritonExtension(_Extension): super().__init__(name, support_aot=False, support_jit=True, priority=priority) def is_hardware_compatible(self) -> bool: - # cuda extension can only be built if cuda is availabe + # cuda extension can only be built if cuda is available try: import torch