mirror of https://github.com/InternLM/InternLM
test(workflow): add ci workflow for acc test (#485)
* add ci workflow for acc test * change train script * add --kill-on-bad-exit=1 and change always to !cancelled --------- Co-authored-by: wangmengke <wangmengke@pjlab.org.cn>pull/499/head
parent
626ed0fc5e
commit
2b984ffa58
|
@ -35,7 +35,7 @@ jobs:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
dataset-preparation:
|
dataset-preparation:
|
||||||
if: ${{ always() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: check-requirements
|
needs: check-requirements
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
steps:
|
steps:
|
||||||
|
@ -55,7 +55,7 @@ jobs:
|
||||||
sh ./ci_scripts/data/tokenizer_alpaca.sh
|
sh ./ci_scripts/data/tokenizer_alpaca.sh
|
||||||
|
|
||||||
train:
|
train:
|
||||||
if: ${{ always() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: check-requirements
|
needs: check-requirements
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
|
@ -92,7 +92,7 @@ jobs:
|
||||||
rsync -av --remove-source-files $GITHUB_WORKSPACE/llm_ckpts ${{env.WORKSPACE_PREFIX}}/ci_clean_bak
|
rsync -av --remove-source-files $GITHUB_WORKSPACE/llm_ckpts ${{env.WORKSPACE_PREFIX}}/ci_clean_bak
|
||||||
|
|
||||||
convert-model-then-load:
|
convert-model-then-load:
|
||||||
if: ${{ always() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: check-requirements
|
needs: check-requirements
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
timeout-minutes: 15
|
timeout-minutes: 15
|
||||||
|
@ -108,11 +108,11 @@ jobs:
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
export PYTHONPATH=$PWD:$PYTHONPATH
|
||||||
sh ./ci_scripts/model/convert_to_hf.sh
|
sh ./ci_scripts/model/convert_to_hf.sh
|
||||||
cd ./hf_ckpt
|
cd ./hf_ckpt
|
||||||
srun -p ${SLURM_PARTITION} --quotatype=spot --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 python ../ci_scripts/model/loaded_as_transformer.py
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --quotatype=spot --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 python ../ci_scripts/model/loaded_as_transformer.py
|
||||||
cd ..
|
cd ..
|
||||||
rsync -av --remove-source-files $GITHUB_WORKSPACE/hf_ckpt ${{env.WORKSPACE_PREFIX}}/ci_clean_bak
|
rsync -av --remove-source-files $GITHUB_WORKSPACE/hf_ckpt ${{env.WORKSPACE_PREFIX}}/ci_clean_bak
|
||||||
load-chat-model-in-hf:
|
load-chat-model-in-hf:
|
||||||
if: ${{ always() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: check-requirements
|
needs: check-requirements
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
timeout-minutes: 15
|
timeout-minutes: 15
|
||||||
|
@ -125,4 +125,4 @@ jobs:
|
||||||
- name: chat-model-in-hf
|
- name: chat-model-in-hf
|
||||||
run: |
|
run: |
|
||||||
source activate internlm-env-test
|
source activate internlm-env-test
|
||||||
srun -p ${SLURM_PARTITION} --quotatype=spot --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 python ./ci_scripts/model/demo_load_7B_chat_model.py
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --quotatype=spot --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 python ./ci_scripts/model/demo_load_7B_chat_model.py
|
||||||
|
|
|
@ -19,4 +19,4 @@ jobs:
|
||||||
- name: training_8GPU
|
- name: training_8GPU
|
||||||
run: |
|
run: |
|
||||||
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n8 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_8GPU" ./tests/test_training
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n8 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_8GPU" ./tests/test_training
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
name: pr-merged
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- "develop"
|
||||||
|
- "main"
|
||||||
|
paths-ignore:
|
||||||
|
- "cmds/**"
|
||||||
|
- "**.md"
|
||||||
|
env:
|
||||||
|
WORKSPACE_PREFIX: $(echo $GITHUB_WORKSPACE |cut -d '/' -f 1-4)
|
||||||
|
SLURM_PARTITION: llm_s
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check-requirements:
|
||||||
|
runs-on: [t_cluster]
|
||||||
|
steps:
|
||||||
|
- name: mask env
|
||||||
|
run: |
|
||||||
|
echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 2
|
||||||
|
- name: check-requirements
|
||||||
|
run: |
|
||||||
|
changed_files=$(git diff --name-only -r HEAD^1 HEAD)
|
||||||
|
echo $changed_files
|
||||||
|
if [[ $changed_files =~ "runtime.txt" ]]; then
|
||||||
|
pip install -r requirements/runtime.txt
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $changed_files =~ "torch.txt" ]]; then
|
||||||
|
pip install -r requirements/torch.txt
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
acc_tests:
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
needs: check-requirements
|
||||||
|
runs-on: [t_cluster]
|
||||||
|
timeout-minutes: 30
|
||||||
|
steps:
|
||||||
|
- name: mask env
|
||||||
|
run: |
|
||||||
|
echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: acc_tests
|
||||||
|
run: |
|
||||||
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
|
export PYTHONPATH=$PWD:$PYTHONPATH
|
||||||
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=internlm-acc-test-${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python ./tests/test_training/train_CI.py --config ./tests/test_training/7B_check_acc.py
|
|
@ -42,7 +42,7 @@ jobs:
|
||||||
|
|
||||||
|
|
||||||
unit_tests_core_pipeline:
|
unit_tests_core_pipeline:
|
||||||
if: ${{ always() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: check-requirements
|
needs: check-requirements
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
@ -56,10 +56,10 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
export PYTHONPATH=$PWD:$PYTHONPATH
|
||||||
srun -p ${SLURM_PARTITION} --job-name=internlm-ut-${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s -v ./tests/test_core/test_pipeline.py
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=internlm-ut-${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s -v ./tests/test_core/test_pipeline.py
|
||||||
|
|
||||||
unit_tests_utils_storage_manager:
|
unit_tests_utils_storage_manager:
|
||||||
if: ${{ always() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: check-requirements
|
needs: check-requirements
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
@ -73,4 +73,4 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
export PYTHONPATH=$PWD:$PYTHONPATH
|
||||||
srun -p ${SLURM_PARTITION} --job-name=internlm-ut-${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s -v ./tests/test_utils/test_storage_manager.py
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=internlm-ut-${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s -v ./tests/test_utils/test_storage_manager.py
|
||||||
|
|
|
@ -17,7 +17,7 @@ jobs:
|
||||||
- name: training_8GPU
|
- name: training_8GPU
|
||||||
run: |
|
run: |
|
||||||
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n8 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_8GPU" ./tests/test_training
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n8 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_8GPU" ./tests/test_training
|
||||||
|
|
||||||
training_16GPU_8DP2TP:
|
training_16GPU_8DP2TP:
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
|
@ -29,7 +29,7 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
sed -i 's/^.*tensor=.*/ tensor=2,/' ./configs/7B_sft.py
|
sed -i 's/^.*tensor=.*/ tensor=2,/' ./configs/7B_sft.py
|
||||||
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2TP" ./tests/test_training
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2TP" ./tests/test_training
|
||||||
|
|
||||||
training_16GPU_8DP2TPSP:
|
training_16GPU_8DP2TPSP:
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
|
@ -42,7 +42,7 @@ jobs:
|
||||||
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
sed -i 's/^.*tensor=.*/ tensor=2,/' ./configs/7B_sft.py
|
sed -i 's/^.*tensor=.*/ tensor=2,/' ./configs/7B_sft.py
|
||||||
sed -i 's/^.*sequence_parallel=.*/ sequence_parallel=True,/' ./configs/7B_sft.py
|
sed -i 's/^.*sequence_parallel=.*/ sequence_parallel=True,/' ./configs/7B_sft.py
|
||||||
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2TPSP" ./tests/test_training
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2TPSP" ./tests/test_training
|
||||||
|
|
||||||
training_16GPU_8DP2PP:
|
training_16GPU_8DP2PP:
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
|
@ -54,7 +54,7 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
sed -i 's/^.*pipeline=.*/ pipeline=dict(size=2),/' ./configs/7B_sft.py
|
sed -i 's/^.*pipeline=.*/ pipeline=dict(size=2),/' ./configs/7B_sft.py
|
||||||
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2PP" ./tests/test_training
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2PP" ./tests/test_training
|
||||||
|
|
||||||
training_16GPU_8DP2PP_InterleavedOverlap:
|
training_16GPU_8DP2PP_InterleavedOverlap:
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
|
@ -67,7 +67,7 @@ jobs:
|
||||||
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
sed -i 's/^.*pipeline=.*/ pipeline=dict(size=2, interleaved_overlap=True),/' ./configs/7B_sft.py
|
sed -i 's/^.*pipeline=.*/ pipeline=dict(size=2, interleaved_overlap=True),/' ./configs/7B_sft.py
|
||||||
sed -i 's/^.*num_chunks=.*/ num_chunks=2,/' ./configs/7B_sft.py
|
sed -i 's/^.*num_chunks=.*/ num_chunks=2,/' ./configs/7B_sft.py
|
||||||
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2PP_InterleavedOverlap" ./tests/test_training
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2PP_InterleavedOverlap" ./tests/test_training
|
||||||
|
|
||||||
unit_test_optimizer:
|
unit_test_optimizer:
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
|
@ -78,7 +78,7 @@ jobs:
|
||||||
- name: test_optimizer
|
- name: test_optimizer
|
||||||
run: |
|
run: |
|
||||||
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_solver/test_optimizer.py
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_solver/test_optimizer.py
|
||||||
|
|
||||||
unit_test_model:
|
unit_test_model:
|
||||||
runs-on: [t_cluster]
|
runs-on: [t_cluster]
|
||||||
|
@ -89,14 +89,14 @@ jobs:
|
||||||
- name: test_embedding_accuracy
|
- name: test_embedding_accuracy
|
||||||
run: |
|
run: |
|
||||||
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_embedding.py
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_embedding.py
|
||||||
|
|
||||||
- name: test_model_internlm_accuracy
|
- name: test_model_internlm_accuracy
|
||||||
run: |
|
run: |
|
||||||
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_model_internlm.py
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_model_internlm.py
|
||||||
|
|
||||||
- name: test_norm_accuracy
|
- name: test_norm_accuracy
|
||||||
run: |
|
run: |
|
||||||
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
|
||||||
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_norm.py
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_norm.py
|
||||||
|
|
|
@ -23,7 +23,7 @@ if [[ ${num} -gt 0 ]]; then
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
srun -p ${SLURM_PARTITION} --quotatype=spot --job-name=$1 --gpus-per-task=1 python tools/tokenizer.py --text_input_path ${DATA} --bin_output_path ${RESULT}
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --quotatype=spot --job-name=$1 --gpus-per-task=1 python tools/tokenizer.py --text_input_path ${DATA} --bin_output_path ${RESULT}
|
||||||
[[ $? -ne 0 ]] && { echo "test tokenizer.py failed."; exit_code=$(($exit_code + 1)); }
|
[[ $? -ne 0 ]] && { echo "test tokenizer.py failed."; exit_code=$(($exit_code + 1)); }
|
||||||
|
|
||||||
file_list=($RESULT $RESULT_META)
|
file_list=($RESULT $RESULT_META)
|
||||||
|
|
|
@ -22,7 +22,7 @@ if [[ ! -f ${file} ]]; then
|
||||||
exit_code=$(($exit_code + 1))
|
exit_code=$(($exit_code + 1))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
srun -p ${SLURM_PARTITION} --exclusive --quotatype=spot --job-name=$2 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ${file}
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --exclusive --quotatype=spot --job-name=$2 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ${file}
|
||||||
[[ $? -ne 0 ]] && { echo "test slurm training failed."; exit_code=$(($exit_code + 1)); }
|
[[ $? -ne 0 ]] && { echo "test slurm training failed."; exit_code=$(($exit_code + 1)); }
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ if [[ -d ${CKPTS20_PATH} ]]; then
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
srun -p ${SLURM_PARTITION} --exclusive --quotatype=spot --job-name=$1 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ./ci_scripts/train/ci_7B_sft.py
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --exclusive --quotatype=spot --job-name=$1 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ./ci_scripts/train/ci_7B_sft.py
|
||||||
[[ $? -ne 0 ]] && { echo "test slurm training failed."; exit_code=$(($exit_code + 1)); }
|
[[ $? -ne 0 ]] && { echo "test slurm training failed."; exit_code=$(($exit_code + 1)); }
|
||||||
|
|
||||||
num=$(num_files "${CKPTS20_OUTPUT}")
|
num=$(num_files "${CKPTS20_OUTPUT}")
|
||||||
|
|
|
@ -22,7 +22,7 @@ if [[ -d ${CKPTS20_PATH} ]]; then
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
srun -p ${SLURM_PARTITION} --exclusive --quotatype=spot --job-name=$1 -N 1 torchrun --nnodes=1 --nproc_per_node=8 --master_port=29501 train.py --config ./ci_scripts/train/ci_7B_sft.py --launcher torch
|
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --exclusive --quotatype=spot --job-name=$1 -N 1 torchrun --nnodes=1 --nproc_per_node=8 --master_port=29501 train.py --config ./ci_scripts/train/ci_7B_sft.py --launcher torch
|
||||||
[[ $? -ne 0 ]] && { echo "test torch training failed."; exit_code=$(($exit_code + 1)); }
|
[[ $? -ne 0 ]] && { echo "test torch training failed."; exit_code=$(($exit_code + 1)); }
|
||||||
|
|
||||||
num=$(num_files "${CKPTS_OUTPUT}")
|
num=$(num_files "${CKPTS_OUTPUT}")
|
||||||
|
|
Loading…
Reference in New Issue