mirror of https://github.com/InternLM/InternLM
Update daily_tests.yaml
parent
0fcd87c7ee
commit
9e1881704b
|
@ -27,18 +27,19 @@ jobs:
|
||||||
conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV}
|
conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV}
|
||||||
source activate internlm-model-latest
|
source activate internlm-model-latest
|
||||||
pip install transformers==${{ matrix.transformers-version }}
|
pip install transformers==${{ matrix.transformers-version }}
|
||||||
pip install torch==2.2.2 torchvision==0.17.2 --index-url https://download.pytorch.org/whl/cu118
|
|
||||||
wget https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.8/flash_attn-2.5.8+cu118torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
|
||||||
pip install flash_attn-2.5.8+cu118torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
|
||||||
pip install sentencepiece auto-gptq==0.6.0 lmdeploy[all]
|
|
||||||
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py
|
|
||||||
conda deactivate
|
|
||||||
- name: load_latest_hf_model
|
- name: load_latest_hf_model
|
||||||
if: matrix.transformers-version == 'latest'
|
if: matrix.transformers-version == 'latest'
|
||||||
run: |
|
run: |
|
||||||
conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV}
|
conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV}
|
||||||
source activate internlm-model-latest
|
source activate internlm-model-latest
|
||||||
pip install transformers
|
pip install transformers
|
||||||
|
- name: run_test
|
||||||
|
run: |
|
||||||
|
conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV}
|
||||||
|
source activate internlm-model-latest
|
||||||
|
pip install torch==2.2.2 torchvision==0.17.2 --index-url https://download.pytorch.org/whl/cu118
|
||||||
|
wget https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.8/flash_attn-2.5.8+cu118torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
||||||
|
pip install flash_attn-2.5.8+cu118torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
||||||
pip install sentencepiece auto-gptq==0.6.0 lmdeploy[all]
|
pip install sentencepiece auto-gptq==0.6.0 lmdeploy[all]
|
||||||
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py
|
||||||
conda deactivate
|
conda deactivate
|
||||||
|
|
Loading…
Reference in New Issue