diff --git a/.github/workflows/daily_tests.yaml b/.github/workflows/daily_tests.yaml index 7b83ce0..4091746 100644 --- a/.github/workflows/daily_tests.yaml +++ b/.github/workflows/daily_tests.yaml @@ -27,18 +27,19 @@ jobs: conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV} source activate internlm-model-latest pip install transformers==${{ matrix.transformers-version }} - pip install torch==2.2.2 torchvision==0.17.2 --index-url https://download.pytorch.org/whl/cu118 - wget https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.8/flash_attn-2.5.8+cu118torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl - pip install flash_attn-2.5.8+cu118torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl - pip install sentencepiece auto-gptq==0.6.0 lmdeploy[all] - srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py - conda deactivate - name: load_latest_hf_model if: matrix.transformers-version == 'latest' run: | conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV} source activate internlm-model-latest pip install transformers + - name: run_test + run: | + conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV} + source activate internlm-model-latest + pip install torch==2.2.2 torchvision==0.17.2 --index-url https://download.pytorch.org/whl/cu118 + wget https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.8/flash_attn-2.5.8+cu118torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl + pip install flash_attn-2.5.8+cu118torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl pip install sentencepiece auto-gptq==0.6.0 lmdeploy[all] srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py conda deactivate