mirror of https://github.com/InternLM/InternLM
Update daily_tests.yaml
parent
a8fd7db00b
commit
91ab284dfd
|
@ -13,7 +13,7 @@ jobs:
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
transformers-version: [4.34.0, latest]
|
transformers-version: [4.36.0, latest]
|
||||||
steps:
|
steps:
|
||||||
- name: mask env
|
- name: mask env
|
||||||
run: |
|
run: |
|
||||||
|
@ -37,8 +37,7 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
source activate internlm-model-latest
|
source activate internlm-model-latest
|
||||||
pip install torch==2.2.2 torchvision==0.17.2 --index-url https://download.pytorch.org/whl/cu118
|
pip install torch==2.2.2 torchvision==0.17.2 --index-url https://download.pytorch.org/whl/cu118
|
||||||
wget https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.8/flash_attn-2.5.8+cu118torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
pip install /mnt/petrelfs/qa-caif-cicd/resource/flash_attn-2.5.8+cu118torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
||||||
pip install flash_attn-2.5.8+cu118torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
|
||||||
pip install sentencepiece auto-gptq==0.6.0 lmdeploy[all]
|
pip install sentencepiece auto-gptq==0.6.0 lmdeploy[all]
|
||||||
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py
|
||||||
conda deactivate
|
conda deactivate
|
||||||
|
|
Loading…
Reference in New Issue