[setup] removed the build dependency on colossalai (#2307)

pull/2312/head
Frank Lee 2023-01-04 11:38:42 +08:00 committed by GitHub
parent d45695d94e
commit 9b765e7a69
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 325 additions and 6 deletions

7
op_builder/__init__.py Normal file
View File

@ -0,0 +1,7 @@
from .cpu_adam import CPUAdamBuilder
from .fused_optim import FusedOptimBuilder
from .moe import MOEBuilder
from .multi_head_attn import MultiHeadAttnBuilder
from .scaled_upper_triang_masked_softmax import ScaledSoftmaxBuilder
__all__ = ['CPUAdamBuilder', 'FusedOptimBuilder', 'MultiHeadAttnBuilder', 'ScaledSoftmaxBuilder', 'MOEBuilder']

104
op_builder/builder.py Normal file
View File

@ -0,0 +1,104 @@
import os
import re
from pathlib import Path
from typing import List
import torch
def get_cuda_cc_flag() -> List:
"""get_cuda_cc_flag
cc flag for your GPU arch
"""
cc_flag = []
for arch in torch.cuda.get_arch_list():
res = re.search(r'sm_(\d+)', arch)
if res:
arch_cap = res[1]
if int(arch_cap) >= 60:
cc_flag.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}'])
return cc_flag
class Builder(object):
def colossalai_src_path(self, code_path):
if os.path.isabs(code_path):
return code_path
else:
return os.path.join(Path(__file__).parent.parent.absolute(), code_path)
def get_cuda_home_include(self):
"""
return include path inside the cuda home.
"""
from torch.utils.cpp_extension import CUDA_HOME
if CUDA_HOME is None:
raise RuntimeError("CUDA_HOME is None, please set CUDA_HOME to compile C++/CUDA kernels in ColossalAI.")
cuda_include = os.path.join(CUDA_HOME, "include")
return cuda_include
# functions must be overrided begin
def sources_files(self):
raise NotImplementedError
def include_dirs(self):
raise NotImplementedError
def cxx_flags(self):
raise NotImplementedError
def nvcc_flags(self):
raise NotImplementedError
# functions must be overrided over
def strip_empty_entries(self, args):
'''
Drop any empty strings from the list of compile and link flags
'''
return [x for x in args if len(x) > 0]
def load(self, verbose=True):
"""
load and compile cpu_adam lib at runtime
Args:
verbose (bool, optional): show detailed info. Defaults to True.
"""
import time
from torch.utils.cpp_extension import load
start_build = time.time()
op_module = load(name=self.name,
sources=self.strip_empty_entries(self.sources_files()),
extra_include_paths=self.strip_empty_entries(self.include_dirs()),
extra_cflags=self.cxx_flags(),
extra_cuda_cflags=self.nvcc_flags(),
extra_ldflags=[],
verbose=verbose)
build_duration = time.time() - start_build
if verbose:
print(f"Time to load {self.name} op: {build_duration} seconds")
return op_module
def builder(self, name) -> 'CUDAExtension':
"""
get a CUDAExtension instance used for setup.py
"""
from torch.utils.cpp_extension import CUDAExtension
return CUDAExtension(
name=name,
sources=[os.path.join('colossalai/kernel/cuda_native/csrc', path) for path in self.sources_files()],
include_dirs=self.include_dirs(),
extra_compile_args={
'cxx': self.cxx_flags(),
'nvcc': self.nvcc_flags()
})

42
op_builder/cpu_adam.py Normal file
View File

@ -0,0 +1,42 @@
import os
from .builder import Builder
from .utils import append_nvcc_threads
class CPUAdamBuilder(Builder):
NAME = "cpu_adam"
BASE_DIR = "colossalai/kernel/cuda_native"
def __init__(self):
self.name = CPUAdamBuilder.NAME
super().__init__()
self.version_dependent_macros = ['-DVERSION_GE_1_1', '-DVERSION_GE_1_3', '-DVERSION_GE_1_5']
# necessary 4 functions
def sources_files(self):
ret = [
os.path.join(CPUAdamBuilder.BASE_DIR, "csrc/cpu_adam.cpp"),
]
return [self.colossalai_src_path(path) for path in ret]
def include_dirs(self):
return [
self.colossalai_src_path(os.path.join(CPUAdamBuilder.BASE_DIR, "includes")),
self.get_cuda_home_include()
]
def cxx_flags(self):
extra_cxx_flags = ['-std=c++14', '-lcudart', '-lcublas', '-g', '-Wno-reorder', '-fopenmp', '-march=native']
return ['-O3'] + self.version_dependent_macros + extra_cxx_flags
def nvcc_flags(self):
extra_cuda_flags = [
'-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__',
'-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK'
]
return append_nvcc_threads(['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags)
# necessary 4 functions

35
op_builder/fused_optim.py Normal file
View File

@ -0,0 +1,35 @@
import os
from .builder import Builder, get_cuda_cc_flag
class FusedOptimBuilder(Builder):
NAME = 'fused_optim'
BASE_DIR = "colossalai/kernel/cuda_native/csrc"
def __init__(self):
self.name = FusedOptimBuilder.NAME
super().__init__()
self.version_dependent_macros = ['-DVERSION_GE_1_1', '-DVERSION_GE_1_3', '-DVERSION_GE_1_5']
def sources_files(self):
ret = [
self.colossalai_src_path(os.path.join(FusedOptimBuilder.BASE_DIR, fname)) for fname in [
'colossal_C_frontend.cpp', 'multi_tensor_sgd_kernel.cu', 'multi_tensor_scale_kernel.cu',
'multi_tensor_adam.cu', 'multi_tensor_l2norm_kernel.cu', 'multi_tensor_lamb.cu'
]
]
return ret
def include_dirs(self):
ret = [os.path.join(FusedOptimBuilder.BASE_DIR, "includes"), self.get_cuda_home_include()]
return [self.colossalai_src_path(path) for path in ret]
def cxx_flags(self):
extra_cxx_flags = []
return ['-O3'] + self.version_dependent_macros + extra_cxx_flags
def nvcc_flags(self):
extra_cuda_flags = ['-lineinfo']
extra_cuda_flags.extend(get_cuda_cc_flag())
return ['-O3', '--use_fast_math'] + extra_cuda_flags

33
op_builder/moe.py Normal file
View File

@ -0,0 +1,33 @@
import os
from .builder import Builder, get_cuda_cc_flag
class MOEBuilder(Builder):
def __init__(self):
self.base_dir = "colossalai/kernel/cuda_native/csrc"
self.name = 'moe'
super().__init__()
def include_dirs(self):
ret = []
ret = [os.path.join(self.base_dir, "includes"), self.get_cuda_home_include()]
ret.append(os.path.join(self.base_dir, "kernels", "include"))
return [self.colossalai_src_path(path) for path in ret]
def sources_files(self):
ret = [os.path.join(self.base_dir, fname) for fname in ['moe_cuda.cpp', 'moe_cuda_kernel.cu']]
return [self.colossalai_src_path(path) for path in ret]
def cxx_flags(self):
return ['-O3', '-DVERSION_GE_1_1', '-DVERSION_GE_1_3', '-DVERSION_GE_1_5']
def nvcc_flags(self):
extra_cuda_flags = [
'-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '--expt-relaxed-constexpr',
'--expt-extended-lambda'
]
extra_cuda_flags.extend(get_cuda_cc_flag())
ret = ['-O3', '--use_fast_math'] + extra_cuda_flags
return ret

View File

@ -0,0 +1,41 @@
import os
from .builder import Builder, get_cuda_cc_flag
class MultiHeadAttnBuilder(Builder):
def __init__(self):
self.base_dir = "colossalai/kernel/cuda_native/csrc"
self.name = 'multihead_attention'
super().__init__()
self.version_dependent_macros = ['-DVERSION_GE_1_1', '-DVERSION_GE_1_3', '-DVERSION_GE_1_5']
def include_dirs(self):
ret = []
ret = [os.path.join(self.base_dir, "includes"), self.get_cuda_home_include()]
ret.append(os.path.join(self.base_dir, "kernels", "include"))
return [self.colossalai_src_path(path) for path in ret]
def sources_files(self):
ret = [
os.path.join(self.base_dir, fname) for fname in [
'multihead_attention_1d.cpp', 'kernels/cublas_wrappers.cu', 'kernels/transform_kernels.cu',
'kernels/dropout_kernels.cu', 'kernels/normalize_kernels.cu', 'kernels/softmax_kernels.cu',
'kernels/general_kernels.cu', 'kernels/cuda_util.cu'
]
]
return [self.colossalai_src_path(path) for path in ret]
def cxx_flags(self):
return ['-O3'] + self.version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = [
'-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__',
'-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK'
]
extra_cuda_flags.extend(get_cuda_cc_flag())
ret = ['-O3', '--use_fast_math'] + extra_cuda_flags
return ret

View File

@ -0,0 +1,36 @@
import os
from .builder import Builder, get_cuda_cc_flag
class ScaledSoftmaxBuilder(Builder):
def __init__(self):
self.base_dir = "colossalai/kernel/cuda_native/csrc"
self.name = 'scaled_upper_triang_masked_softmax'
super().__init__()
def include_dirs(self):
ret = []
ret = [os.path.join(self.base_dir, "includes"), self.get_cuda_home_include()]
ret.append(os.path.join(self.base_dir, "kernels", "include"))
return [self.colossalai_src_path(path) for path in ret]
def sources_files(self):
ret = [
os.path.join(self.base_dir, fname)
for fname in ['scaled_upper_triang_masked_softmax.cpp', 'scaled_upper_triang_masked_softmax_cuda.cu']
]
return [self.colossalai_src_path(path) for path in ret]
def cxx_flags(self):
return ['-O3']
def nvcc_flags(self):
extra_cuda_flags = [
'-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '--expt-relaxed-constexpr',
'--expt-extended-lambda'
]
extra_cuda_flags.extend(get_cuda_cc_flag())
ret = ['-O3', '--use_fast_math'] + extra_cuda_flags
return ret

20
op_builder/utils.py Normal file
View File

@ -0,0 +1,20 @@
import subprocess
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def append_nvcc_threads(nvcc_extra_args):
from torch.utils.cpp_extension import CUDA_HOME
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args

View File

@ -3,7 +3,7 @@ import re
from setuptools import find_packages, setup
from colossalai.kernel.op_builder.utils import get_cuda_bare_metal_version
from op_builder.utils import get_cuda_bare_metal_version
try:
import torch
@ -18,6 +18,7 @@ try:
except ImportError:
raise ModuleNotFoundError('torch is not found. You need to install PyTorch before installing Colossal-AI.')
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
build_cuda_ext = True
@ -137,7 +138,7 @@ if build_cuda_ext:
})
#### fused optim kernels ###
from colossalai.kernel.op_builder import FusedOptimBuilder
from op_builder import FusedOptimBuilder
ext_modules.append(FusedOptimBuilder().builder('colossalai._C.fused_optim'))
#### N-D parallel kernels ###
@ -154,14 +155,14 @@ if build_cuda_ext:
'--expt-extended-lambda'
]
from colossalai.kernel.op_builder import ScaledSoftmaxBuilder
from op_builder import ScaledSoftmaxBuilder
ext_modules.append(ScaledSoftmaxBuilder().builder('colossalai._C.scaled_upper_triang_masked_softmax'))
ext_modules.append(
cuda_ext_helper('colossalai._C.scaled_masked_softmax',
['scaled_masked_softmax.cpp', 'scaled_masked_softmax_cuda.cu'], extra_cuda_flags + cc_flag))
from colossalai.kernel.op_builder import MOEBuilder
from op_builder import MOEBuilder
ext_modules.append(MOEBuilder().builder('colossalai._C.moe'))
extra_cuda_flags = ['-maxrregcount=50']
@ -171,11 +172,11 @@ if build_cuda_ext:
extra_cuda_flags + cc_flag))
### MultiHeadAttn Kernel ####
from colossalai.kernel.op_builder import MultiHeadAttnBuilder
from op_builder import MultiHeadAttnBuilder
ext_modules.append(MultiHeadAttnBuilder().builder('colossalai._C.multihead_attention'))
### Gemini Adam kernel ####
from colossalai.kernel.op_builder import CPUAdamBuilder
from op_builder import CPUAdamBuilder
ext_modules.append(CPUAdamBuilder().builder('colossalai._C.cpu_optim'))
setup(name='colossalai',