You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
ColossalAI/op_builder/builder.py

111 lines
3.3 KiB

import os
import re
from pathlib import Path
from typing import List
def get_cuda_cc_flag() -> List:
"""get_cuda_cc_flag
cc flag for your GPU arch
"""
# only import torch when needed
# this is to avoid importing torch when building on a machine without torch pre-installed
# one case is to build wheel for pypi release
import torch
cc_flag = []
for arch in torch.cuda.get_arch_list():
res = re.search(r'sm_(\d+)', arch)
if res:
arch_cap = res[1]
if int(arch_cap) >= 60:
cc_flag.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}'])
return cc_flag
class Builder(object):
def colossalai_src_path(self, code_path):
current_file_path = Path(__file__)
if os.path.islink(current_file_path.parent):
# symbolic link
return os.path.join(current_file_path.parent.parent.absolute(), code_path)
else:
return os.path.join(current_file_path.parent.parent.absolute(), "colossalai", "kernel", code_path)
def get_cuda_home_include(self):
"""
return include path inside the cuda home.
"""
from torch.utils.cpp_extension import CUDA_HOME
if CUDA_HOME is None:
raise RuntimeError("CUDA_HOME is None, please set CUDA_HOME to compile C++/CUDA kernels in ColossalAI.")
cuda_include = os.path.join(CUDA_HOME, "include")
return cuda_include
# functions must be overrided begin
def sources_files(self):
raise NotImplementedError
def include_dirs(self):
raise NotImplementedError
def cxx_flags(self):
raise NotImplementedError
def nvcc_flags(self):
raise NotImplementedError
# functions must be overrided over
def strip_empty_entries(self, args):
'''
Drop any empty strings from the list of compile and link flags
'''
return [x for x in args if len(x) > 0]
def load(self, verbose=True):
"""
load and compile cpu_adam lib at runtime
Args:
verbose (bool, optional): show detailed info. Defaults to True.
"""
import time
from torch.utils.cpp_extension import load
start_build = time.time()
op_module = load(name=self.name,
sources=self.strip_empty_entries(self.sources_files()),
extra_include_paths=self.strip_empty_entries(self.include_dirs()),
extra_cflags=self.cxx_flags(),
extra_cuda_cflags=self.nvcc_flags(),
extra_ldflags=[],
verbose=verbose)
build_duration = time.time() - start_build
if verbose:
print(f"Time to load {self.name} op: {build_duration} seconds")
return op_module
def builder(self, name) -> 'CUDAExtension':
"""
get a CUDAExtension instance used for setup.py
"""
from torch.utils.cpp_extension import CUDAExtension
return CUDAExtension(
name=name,
sources=[os.path.join('colossalai/kernel/cuda_native/csrc', path) for path in self.sources_files()],
include_dirs=self.include_dirs(),
extra_compile_args={
'cxx': self.cxx_flags(),
'nvcc': self.nvcc_flags()
})