mirror of https://github.com/hpcaitech/ColossalAI
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
25 lines
792 B
25 lines
792 B
set_n_least_used_CUDA_VISIBLE_DEVICES() {
|
|
local n=${1:-"9999"}
|
|
echo "GPU Memory Usage:"
|
|
local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv \
|
|
| tail -n +2 \
|
|
| nl -v 0 \
|
|
| tee /dev/tty \
|
|
| sort -g -k 2 \
|
|
| awk '{print $1}' \
|
|
| head -n $n)
|
|
export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g')
|
|
echo "Now CUDA_VISIBLE_DEVICES is set to:"
|
|
echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"
|
|
}
|
|
|
|
set_n_least_used_CUDA_VISIBLE_DEVICES 2
|
|
|
|
torchrun --standalone --nproc_per_node=2 train_reward_model.py \
|
|
--pretrain <your pretrain path> \
|
|
--model 'bloom' \
|
|
--strategy colossalai_zero2 \
|
|
--loss_fn 'log_sig'\
|
|
--save_path <your model saving path>\
|
|
--dataset 'Anthropic/hh-rlhf'\
|