mirror of https://github.com/THUDM/ChatGLM2-6B
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
30 lines
823 B
30 lines
823 B
1 year ago
|
PRE_SEQ_LEN=128
|
||
|
LR=1e-2
|
||
|
NUM_GPUS=1
|
||
|
|
||
|
torchrun --standalone --nnodes=1 --nproc-per-node=$NUM_GPUS main.py \
|
||
|
--do_train \
|
||
|
--train_file $CHAT_TRAIN_DATA \
|
||
|
--validation_file $CHAT_VAL_DATA \
|
||
|
--preprocessing_num_workers 10 \
|
||
|
--prompt_column prompt \
|
||
|
--response_column response \
|
||
|
--history_column history \
|
||
|
--overwrite_cache \
|
||
|
--model_name_or_path THUDM/chatglm2-6b \
|
||
|
--output_dir $CHECKPOINT_NAME \
|
||
|
--overwrite_output_dir \
|
||
|
--max_source_length 256 \
|
||
|
--max_target_length 256 \
|
||
|
--per_device_train_batch_size 1 \
|
||
|
--per_device_eval_batch_size 1 \
|
||
|
--gradient_accumulation_steps 16 \
|
||
|
--predict_with_generate \
|
||
|
--max_steps 3000 \
|
||
|
--logging_steps 10 \
|
||
|
--save_steps 1000 \
|
||
|
--learning_rate $LR \
|
||
|
--pre_seq_len $PRE_SEQ_LEN \
|
||
|
--quantization_bit 4
|
||
|
|