diff --git a/.github/workflows/compatiblity_test_on_dispatch.yml b/.github/workflows/compatiblity_test_on_dispatch.yml index 717cf729b..3dcc4dfd1 100644 --- a/.github/workflows/compatiblity_test_on_dispatch.yml +++ b/.github/workflows/compatiblity_test_on_dispatch.yml @@ -19,26 +19,26 @@ jobs: outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - id: set-matrix - env: - TORCH_VERSIONS: ${{ inputs.torch_version }} - CUDA_VERSIONS: ${{ inputs.cuda_version }} - run: | - IFS=',' - DOCKER_IMAGE=() + - id: set-matrix + env: + TORCH_VERSIONS: ${{ inputs.torch_version }} + CUDA_VERSIONS: ${{ inputs.cuda_version }} + run: | + IFS=',' + DOCKER_IMAGE=() - for tv in $TORCH_VERSIONS - do - for cv in $CUDA_VERSIONS - do - DOCKER_IMAGE+=("\"hpcaitech/pytorch-cuda:${tv}-${cv}\"") - done - done + for tv in $TORCH_VERSIONS + do + for cv in $CUDA_VERSIONS + do + DOCKER_IMAGE+=("\"hpcaitech/pytorch-cuda:${tv}-${cv}\"") + done + done - container=$( IFS=',' ; echo "${DOCKER_IMAGE[*]}" ) - container="[${container}]" - echo "$container" - echo "::set-output name=matrix::{\"container\":$(echo "$container")}" + container=$( IFS=',' ; echo "${DOCKER_IMAGE[*]}" ) + container="[${container}]" + echo "$container" + echo "::set-output name=matrix::{\"container\":$(echo "$container")}" build: name: Test for PyTorch Compatibility @@ -70,6 +70,17 @@ jobs: - uses: actions/checkout@v2 with: ssh-key: ${{ secrets.SSH_KEY_FOR_CI }} + - name: Download cub for CUDA 10.2 + run: | + CUDA_VERSION=$(cat $CUDA_HOME/version.txt | grep "CUDA Version" | awk '{print $NF}' | cut -d. -f1,2) + + # check if it is CUDA 10.2 + # download cub + if [ "$CUDA_VERSION" = "10.2" ]; then + wget https://github.com/NVIDIA/cub/archive/refs/tags/1.8.0.zip + unzip 1.8.0.zip + cp -r cub-1.8.0/cub/ colossalai/kernel/cuda_native/csrc/kernels/include/ + fi - name: Install Colossal-AI run: | pip install -r requirements/requirements.txt diff --git a/.github/workflows/compatiblity_test_on_pr.yml b/.github/workflows/compatiblity_test_on_pr.yml index 2fca67b82..94a723388 100644 --- a/.github/workflows/compatiblity_test_on_pr.yml +++ b/.github/workflows/compatiblity_test_on_pr.yml @@ -3,8 +3,8 @@ name: Compatibility Test on PR on: pull_request: paths: - - 'version.txt' - - '.compatibility' + - "version.txt" + - ".compatibility" jobs: matrix_preparation: @@ -58,6 +58,18 @@ jobs: - uses: actions/checkout@v2 with: ssh-key: ${{ secrets.SSH_KEY_FOR_CI }} + - name: Download cub for CUDA 10.2 + run: | + CUDA_VERSION=$(cat $CUDA_HOME/version.txt | grep "CUDA Version" | awk '{print $NF}' | cut -d. -f1,2) + + # check if it is CUDA 10.2 + # download cub + if [ "$CUDA_VERSION" = "10.2" ]; then + wget https://github.com/NVIDIA/cub/archive/refs/tags/1.8.0.zip + unzip 1.8.0.zip + cp -r cub-1.8.0/cub/ colossalai/kernel/cuda_native/csrc/kernels/include/ + fi + - name: Install Colossal-AI run: | pip install -v --no-cache-dir . diff --git a/README.md b/README.md index c33caba90..34c8a6b73 100644 --- a/README.md +++ b/README.md @@ -362,6 +362,22 @@ If you want to install and enable CUDA kernel fusion (compulsory installation wh CUDA_EXT=1 pip install . ``` +For Users with CUDA 10.2, you can still build ColossalAI from source. However, you need to manually download the cub library and copy it to the corresponding directory. + +```bash +# clone the repository +git clone https://github.com/hpcaitech/ColossalAI.git +cd ColossalAI + +# download the cub library +wget https://github.com/NVIDIA/cub/archive/refs/tags/1.8.0.zip +unzip 1.8.0.zip +cp -r cub-1.8.0/cub/ colossalai/kernel/cuda_native/csrc/kernels/include/ + +# install +CUDA_EXT=1 pip install . +``` +

(back to top)

## Use Docker diff --git a/docs/source/en/get_started/installation.md b/docs/source/en/get_started/installation.md index b626edb19..6fc4ce2c9 100644 --- a/docs/source/en/get_started/installation.md +++ b/docs/source/en/get_started/installation.md @@ -48,5 +48,20 @@ If you don't want to install and enable CUDA kernel fusion (compulsory installat pip install . ``` +For Users with CUDA 10.2, you can still build ColossalAI from source. However, you need to manually download the cub library and copy it to the corresponding directory. + +```bash +# clone the repository +git clone https://github.com/hpcaitech/ColossalAI.git +cd ColossalAI + +# download the cub library +wget https://github.com/NVIDIA/cub/archive/refs/tags/1.8.0.zip +unzip 1.8.0.zip +cp -r cub-1.8.0/cub/ colossalai/kernel/cuda_native/csrc/kernels/include/ + +# install +CUDA_EXT=1 pip install . +``` diff --git a/docs/source/zh-Hans/get_started/installation.md b/docs/source/zh-Hans/get_started/installation.md index e0d726c74..a32627db6 100755 --- a/docs/source/zh-Hans/get_started/installation.md +++ b/docs/source/zh-Hans/get_started/installation.md @@ -47,4 +47,20 @@ CUDA_EXT=1 pip install . pip install . ``` +如果您在使用CUDA 10.2,您仍然可以从源码安装ColossalA。但是您需要手动下载cub库并将其复制到相应的目录。 + +```bash +# clone the repository +git clone https://github.com/hpcaitech/ColossalAI.git +cd ColossalAI + +# download the cub library +wget https://github.com/NVIDIA/cub/archive/refs/tags/1.8.0.zip +unzip 1.8.0.zip +cp -r cub-1.8.0/cub/ colossalai/kernel/cuda_native/csrc/kernels/include/ + +# install +CUDA_EXT=1 pip install . +``` +