2023-03-03 13:45:05 +00:00
import os
2023-01-06 12:50:26 +00:00
import re
2023-01-04 03:38:42 +00:00
import subprocess
2023-03-03 13:45:05 +00:00
import warnings
2023-01-06 12:50:26 +00:00
from typing import List
2023-01-04 03:38:42 +00:00
2023-03-03 13:45:05 +00:00
def print_rank_0 ( message : str ) - > None :
"""
Print on only one process to avoid spamming .
"""
try :
import torch . distributed as dist
2023-09-19 06:20:26 +00:00
2023-03-03 13:45:05 +00:00
if not dist . is_initialized ( ) :
is_main_rank = True
else :
is_main_rank = dist . get_rank ( ) == 0
except ImportError :
is_main_rank = True
if is_main_rank :
print ( message )
def get_cuda_version_in_pytorch ( ) - > List [ int ] :
"""
This function returns the CUDA version in the PyTorch build .
Returns :
The CUDA version required by PyTorch , in the form of tuple ( major , minor ) .
"""
import torch
try :
torch_cuda_major = torch . version . cuda . split ( " . " ) [ 0 ]
torch_cuda_minor = torch . version . cuda . split ( " . " ) [ 1 ]
except :
raise ValueError (
2023-09-19 06:20:26 +00:00
" [extension] Cannot retrieve the CUDA version in the PyTorch binary given by torch.version.cuda "
)
2023-03-03 13:45:05 +00:00
return torch_cuda_major , torch_cuda_minor
def get_cuda_bare_metal_version ( cuda_dir ) - > List [ int ] :
"""
Get the System CUDA version from nvcc .
Args :
cuda_dir ( str ) : the directory for CUDA Toolkit .
Returns :
The CUDA version required by PyTorch , in the form of tuple ( major , minor ) .
"""
2023-09-19 06:20:26 +00:00
nvcc_path = os . path . join ( cuda_dir , " bin/nvcc " )
2023-03-03 13:45:05 +00:00
if cuda_dir is None :
raise ValueError (
f " [extension] The argument cuda_dir is None, but expected to be a string. Please make sure your have exported the environment variable CUDA_HOME correctly. "
)
# check for nvcc path
if not os . path . exists ( nvcc_path ) :
raise FileNotFoundError (
f " [extension] The nvcc compiler is not found in { nvcc_path } , please make sure you have set the correct value for CUDA_HOME. "
)
# parse the nvcc -v output to obtain the system cuda version
try :
raw_output = subprocess . check_output ( [ cuda_dir + " /bin/nvcc " , " -V " ] , universal_newlines = True )
output = raw_output . split ( )
release_idx = output . index ( " release " ) + 1
release = output [ release_idx ] . split ( " . " )
bare_metal_major = release [ 0 ]
bare_metal_minor = release [ 1 ] [ 0 ]
except :
raise ValueError (
f " [extension] Failed to parse the nvcc output to obtain the system CUDA bare metal version. The output for ' nvcc -v ' is \n { raw_output } "
)
return bare_metal_major , bare_metal_minor
def check_system_pytorch_cuda_match ( cuda_dir ) :
bare_metal_major , bare_metal_minor = get_cuda_bare_metal_version ( cuda_dir )
torch_cuda_major , torch_cuda_minor = get_cuda_version_in_pytorch ( )
if bare_metal_major != torch_cuda_major :
raise Exception (
2023-09-19 06:20:26 +00:00
f " [extension] Failed to build PyTorch extension because the detected CUDA version ( { bare_metal_major } . { bare_metal_minor } ) "
f " mismatches the version that was used to compile PyTorch ( { torch_cuda_major } . { torch_cuda_minor } ). "
" Please make sure you have set the CUDA_HOME correctly and installed the correct PyTorch in https://pytorch.org/get-started/locally/ . "
2023-03-03 13:45:05 +00:00
)
if bare_metal_minor != torch_cuda_minor :
warnings . warn (
f " [extension] The CUDA version on the system ( { bare_metal_major } . { bare_metal_minor } ) does not match with the version ( { torch_cuda_major } . { torch_cuda_minor } ) torch was compiled with. "
" The mismatch is found in the minor version. As the APIs are compatible, we will allow compilation to proceed. "
" If you encounter any issue when using the built kernel, please try to build it again with fully matched CUDA versions "
)
return True
def get_pytorch_version ( ) - > List [ int ] :
"""
This functions finds the PyTorch version .
Returns :
A tuple of integers in the form of ( major , minor , patch ) .
"""
import torch
2023-09-19 06:20:26 +00:00
torch_version = torch . __version__ . split ( " + " ) [ 0 ]
TORCH_MAJOR = int ( torch_version . split ( " . " ) [ 0 ] )
TORCH_MINOR = int ( torch_version . split ( " . " ) [ 1 ] )
TORCH_PATCH = int ( torch_version . split ( " . " ) [ 2 ] , 16 )
2023-03-03 13:45:05 +00:00
return TORCH_MAJOR , TORCH_MINOR , TORCH_PATCH
2023-01-04 03:38:42 +00:00
2023-03-03 13:45:05 +00:00
def check_pytorch_version ( min_major_version , min_minor_version ) - > bool :
"""
Compare the current PyTorch version with the minium required version .
Args :
min_major_version ( int ) : the minimum major version of PyTorch required
min_minor_version ( int ) : the minimum minor version of PyTorch required
Returns :
A boolean value . The value is True if the current pytorch version is acceptable and False otherwise .
"""
# get pytorch version
torch_major , torch_minor , _ = get_pytorch_version ( )
# if the
if torch_major < min_major_version or ( torch_major == min_major_version and torch_minor < min_minor_version ) :
raise RuntimeError (
f " [extension] Colossal-AI requires Pytorch { min_major_version } . { min_minor_version } or newer. \n "
2023-09-19 06:20:26 +00:00
" The latest stable release can be obtained from https://pytorch.org/get-started/locally/ "
)
2023-03-03 13:45:05 +00:00
def check_cuda_availability ( ) :
"""
Check if CUDA is available on the system .
Returns :
A boolean value . True if CUDA is available and False otherwise .
"""
import torch
2023-09-19 06:20:26 +00:00
2023-03-03 13:45:05 +00:00
return torch . cuda . is_available ( )
def set_cuda_arch_list ( cuda_dir ) :
"""
This function sets the PyTorch TORCH_CUDA_ARCH_LIST variable for ahead - of - time extension compilation .
Ahead - of - time compilation occurs when CUDA_EXT = 1 is set when running ' pip install ' .
"""
cuda_available = check_cuda_availability ( )
2023-01-06 12:50:26 +00:00
2023-03-03 13:45:05 +00:00
# we only need to set this when CUDA is not available for cross-compilation
if not cuda_available :
2023-09-19 06:20:26 +00:00
warnings . warn (
" \n [extension] PyTorch did not find available GPUs on this system. \n "
" If your intention is to cross-compile, this is not an error. \n "
" By default, Colossal-AI will cross-compile for \n "
" 1. Pascal (compute capabilities 6.0, 6.1, 6.2), \n "
" 2. Volta (compute capability 7.0) \n "
" 3. Turing (compute capability 7.5), \n "
" 4. Ampere (compute capability 8.0, 8.6)if the CUDA version is >= 11.0 \n "
" \n If you wish to cross-compile for a single specific architecture, \n "
' export TORCH_CUDA_ARCH_LIST= " compute capability " before running setup.py. \n '
)
2023-03-03 13:45:05 +00:00
if os . environ . get ( " TORCH_CUDA_ARCH_LIST " , None ) is None :
bare_metal_major , bare_metal_minor = get_cuda_bare_metal_version ( cuda_dir )
2023-09-19 06:20:26 +00:00
arch_list = [ " 6.0 " , " 6.1 " , " 6.2 " , " 7.0 " , " 7.5 " ]
2023-03-03 13:45:05 +00:00
if int ( bare_metal_major ) == 11 :
if int ( bare_metal_minor ) == 0 :
2023-09-19 06:20:26 +00:00
arch_list . append ( " 8.0 " )
2023-03-03 13:45:05 +00:00
else :
2023-09-19 06:20:26 +00:00
arch_list . append ( " 8.0 " )
arch_list . append ( " 8.6 " )
2023-03-03 13:45:05 +00:00
2023-09-19 06:20:26 +00:00
arch_list_str = " ; " . join ( arch_list )
2023-03-03 13:45:05 +00:00
os . environ [ " TORCH_CUDA_ARCH_LIST " ] = arch_list_str
return False
return True
def get_cuda_cc_flag ( ) - > List [ str ] :
"""
This function produces the cc flags for your GPU arch
Returns :
The CUDA cc flags for compilation .
2023-01-06 12:50:26 +00:00
"""
# only import torch when needed
# this is to avoid importing torch when building on a machine without torch pre-installed
# one case is to build wheel for pypi release
import torch
2023-03-03 13:45:05 +00:00
2023-01-06 12:50:26 +00:00
cc_flag = [ ]
2023-09-19 06:20:26 +00:00
max_arch = " " . join ( str ( i ) for i in torch . cuda . get_device_capability ( ) )
2023-01-06 12:50:26 +00:00
for arch in torch . cuda . get_arch_list ( ) :
2023-09-19 06:20:26 +00:00
res = re . search ( r " sm_( \ d+) " , arch )
2023-01-06 12:50:26 +00:00
if res :
arch_cap = res [ 1 ]
2023-09-01 10:12:58 +00:00
if int ( arch_cap ) > = 60 and int ( arch_cap ) < = int ( max_arch ) :
2023-09-19 06:20:26 +00:00
cc_flag . extend ( [ " -gencode " , f " arch=compute_ { arch_cap } ,code= { arch } " ] )
2023-01-06 12:50:26 +00:00
return cc_flag
2023-01-04 03:38:42 +00:00
2023-03-03 13:45:05 +00:00
def append_nvcc_threads ( nvcc_extra_args : List [ str ] ) - > List [ str ] :
"""
This function appends the threads flag to your nvcc args .
Returns :
The nvcc compilation flags including the threads flag .
"""
2023-01-04 03:38:42 +00:00
from torch . utils . cpp_extension import CUDA_HOME
2023-03-03 13:45:05 +00:00
bare_metal_major , bare_metal_minor = get_cuda_bare_metal_version ( CUDA_HOME )
2023-01-04 03:38:42 +00:00
if int ( bare_metal_major ) > = 11 and int ( bare_metal_minor ) > = 2 :
return nvcc_extra_args + [ " --threads " , " 4 " ]
return nvcc_extra_args