mirror of https://github.com/hpcaitech/ColossalAI
aibig-modeldata-parallelismdeep-learningdistributed-computingfoundation-modelsheterogeneous-traininghpcinferencelarge-scalemodel-parallelismpipeline-parallelism
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
52 lines
1.5 KiB
52 lines
1.5 KiB
#!/bin/bash |
|
set_n_least_used_CUDA_VISIBLE_DEVICES() { |
|
local n=${1:-"9999"} |
|
echo "GPU Memory Usage:" |
|
local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv | |
|
tail -n +2 | |
|
nl -v 0 | |
|
tee /dev/tty | |
|
sort -g -k 2 | |
|
awk '{print $1}' | |
|
head -n $n) |
|
export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g') |
|
echo "Now CUDA_VISIBLE_DEVICES is set to:" |
|
echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES" |
|
} |
|
|
|
set_n_least_used_CUDA_VISIBLE_DEVICES 8 |
|
|
|
PROJECT_NAME="" |
|
PARENT_SAVE_DIR="" |
|
PARENT_TENSORBOARD_DIR="" |
|
PARENT_CONFIG_FILE="" |
|
PRETRAINED_MODEL_PATH="" |
|
|
|
declare -a dataset=( |
|
"PATH TO THE DATASET" |
|
) |
|
|
|
TIMESTAMP=$(date +%Y-%m-%d-%H-%M-%S) |
|
FULL_PROJECT_NAME="${PROJECT_NAME}-${TIMESTAMP}" |
|
SAVE_DIR="${PARENT_SAVE_DIR}${FULL_PROJECT_NAME}" |
|
TENSORBOARD_DIR="${PARENT_TENSORBOARD_DIR}${FULL_PROJECT_NAME}" |
|
CONFIG_FILE="${PARENT_CONFIG_FILE}${FULL_PROJECT_NAME}.json" |
|
|
|
colossalai run --nproc_per_node 8 --hostfile hostfile --master_port 30013 train.py \ |
|
--pretrained $PRETRAINED_MODEL_PATH \ |
|
--dataset ${dataset[@]} \ |
|
--plugin "zero2" \ |
|
--save_interval 400 \ |
|
--save_dir $SAVE_DIR \ |
|
--tensorboard_dir $TENSORBOARD_DIR \ |
|
--config_file $CONFIG_FILE \ |
|
--num_epochs 1 \ |
|
--micro_batch_size 8 \ |
|
--lr 1e-4 \ |
|
--mixed_precision "bf16" \ |
|
--grad_clip 1.0 \ |
|
--weight_decay 0.01 \ |
|
--warmup_steps 100 \ |
|
--use_grad_checkpoint \ |
|
--use_flash_attn \ |
|
--pad_token "unk"
|
|
|