add demo test (#132)

Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
pull/133/head
kkscilife 2023-07-25 19:51:50 +08:00 committed by GitHub
parent 26205c1edf
commit 03851ea2fa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 342 additions and 0 deletions

68
.github/workflows/demo_in_readme.yaml vendored Normal file
View File

@ -0,0 +1,68 @@
name: demo-in-readme
on:
pull_request:
branches:
- "main"
- "develop"
paths-ignore:
- "docs/**"
- "**.md"
jobs:
dataset-preparation:
runs-on: [lmtest]
steps:
- uses: actions/checkout@v3
- name: raw-chinese-data
run: |
source activate internlm-env-test
sh ./ci_scripts/data/tokenizer_chinese.sh
- name: alpaca-data
run: |
source activate internlm-env-test
sh ./ci_scripts/data/tokenizer_alpaca.sh
train:
runs-on: [lmtest]
steps:
- uses: actions/checkout@v3
- name: slurm-train
run: |
source activate internlm-env-test
sh ./ci_scripts/train/slurm_train.sh
rm -rf $GITHUB_WORKSPACE/llm_ckpts
- name: torchrun-train
run: |
source activate internlm-env-test
sh ./ci_scripts/train/torchrun.sh
rm -rf $GITHUB_WORKSPACE/llm_ckpts
convert-model-then-load:
runs-on: [lmtest]
steps:
- uses: actions/checkout@v3
- name: convert-model-then-load
run: |
source activate internlm-env-test
export PYTHONPATH=$PWD:$PYTHONPATH
sh ./ci_scripts/model/convert_to_hf.sh
cd ./hf_ckpt
srun -p llm2 python ../ci_scripts/model/loaded_as_transformer.py
cd ..
rm -rf $GITHUB_WORKSPACE/hf_ckpt
load-chat-model-in-hf:
runs-on: [lmtest]
steps:
- uses: actions/checkout@v3
- name: chat-model-in-hf
run: |
source activate internlm-env-test
srun -p llm2 python ./ci_scripts/model/demo_load_7B_chat_model.py

View File

@ -0,0 +1,14 @@
#!/bin/bash
export exit_code=0
function if_exist() {
ls -l $file_path
exit_code_now=$?
exit_code=$(($exit_code + $exit_code_now))
}
function num_files() {
file_num=$(ls -l $file_dir |wc -l)
echo "there are $file_num files in $file_dir"
}

View File

@ -0,0 +1,22 @@
#!/bin/bash
rm -rf /mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/result/*
python tools/alpaca_tokenizer.py /mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/alpaca_data.json /mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/result tools/V7_sft.model --split_ratio 0.1
file_one="/mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/result/train/en/dataset.bin"
file_two="/mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/result/train/en/dataset.bin.meta"
file_three="/mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/result/valid/en/dataset.bin"
file_four="/mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/result/valid/en/dataset.bin.meta"
file_list=($file_one $file_two $file_three $file_four)
source ./ci_scripts/common/basic_func.sh
for file_path in ${file_list[@]};
do
if_exist $file_path
done
if [ $exit_code -ne 0 ]
then
exit 1
fi

View File

@ -0,0 +1,19 @@
#!/bin/bash
rm -rf /mnt/petrelfs/qa-caif-cicd/data/lm_data/cn_data/result.*
srun -p llm2 python tools/tokenizer.py --text_input_path /mnt/petrelfs/qa-caif-cicd/data/lm_data/cn_data/raw_data.txt --bin_output_path /mnt/petrelfs/qa-caif-cicd/data/lm_data/cn_data/result.bin
file_one="/mnt/petrelfs/qa-caif-cicd/data/lm_data/cn_data/result.bin"
file_two="/mnt/petrelfs/qa-caif-cicd/data/lm_data/cn_data/result.bin.meta"
file_list=($file_one $file_two)
source ./ci_scripts/common/basic_func.sh
for file_path in ${file_list[@]};
do
if_exist $file_path
done
if [ $exit_code -ne 0 ]
then
exit 1
fi

View File

@ -0,0 +1,33 @@
#!/bin/bash
rm -rf ./hf_ckpt/*
python ./tools/transformers/convert2hf.py --src_folder /mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/llm_ckpts/20 --tgt_folder hf_ckpt/ --tokenizer ./tools/V7_sft.model
#assert exists model
file_one="$GITHUB_WORKSPACE/hf_ckpt/tokenizer.model"
file_two="$GITHUB_WORKSPACE/hf_ckpt/config.json"
file_three="$GITHUB_WORKSPACE/hf_ckpt/modeling_internlm.py"
file_list=($file_one $file_two $file_three)
file_dir="$GITHUB_WORKSPACE/hf_ckpt/*"
source ./ci_scripts/common/basic_func.sh
for file_path in ${file_list[@]};
do
if_exist $file_path
done
num_files ${file_dir}
if [ $file_num -ne 9 ]
then
echo "The num of files is not right"
ls -l $file_dir
exit_code=$(($exit_code + 1))
fi
if [ $exit_code -ne 0 ]
then
exit 1
fi

View File

@ -0,0 +1,12 @@
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).cuda()
model = model.eval()
response, history = model.chat(tokenizer, "你好", history=[])
print(response)
assert len(response) != 0
response, history = model.chat(tokenizer, "请提供三个管理时间的建议。", history=history)
print(response)
assert len(response) != 0

View File

@ -0,0 +1,7 @@
from transformers import AutoModel
model = AutoModel.from_pretrained("../hf_ckpt/", trust_remote_code=True).cuda()
print(model)
assert model.config.hidden_size == 2048
assert model.config.num_attention_heads == 16
assert model.config.num_hidden_layers == 16

View File

@ -0,0 +1,130 @@
JOB_NAME = "7b_train"
SEQ_LEN = 1024
HIDDEN_SIZE = 2048
NUM_ATTENTION_HEAD = 16
MLP_RATIO = 8 / 3
NUM_LAYER = 16
VOCAB_SIZE = 103168
# Ckpt folder format:
# fs: 'local:/mnt/nfs/XXX'
# oss: 'boto3:s3://model_weights/XXX'
MODEL_ONLY_FOLDER = "local:llm_ckpts/xxxx"
#SAVE_CKPT_FOLDER = "local:llm_ckpts"
SAVE_CKPT_FOLDER = "local:llm_ckpts"
#LOAD_CKPT_FOLDER = "local:llm_ckpts/49"
ckpt = dict(
# Path to save training ckpt.
save_ckpt_folder=SAVE_CKPT_FOLDER,
# Path to continue training ckpt (load model weights and scheduler/context states).
# load_ckpt_folder=LOAD_CKPT_FOLDER,
# Path to initialize with given model weights.
# load_model_only_folder=MODEL_ONLY_FOLDER,
checkpoint_every=20,
# Wheter to load optimizer states when continuing training.
load_optimizer=True,
)
TRAIN_FOLDER = "/mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/train/en"
data = dict(
seq_len=SEQ_LEN,
# micro_num means the number of micro_batch contained in one gradient update
micro_num=4,
# packed_length = micro_bsz * SEQ_LEN
micro_bsz=2,
pack_sample_into_one=False,
total_steps=20,
skip_batches="",
rampup_batch_size="",
# Datasets with less than 50 rows will be discarded
min_length=50,
# train_folder=TRAIN_FOLDER,
)
grad_scaler = dict(
fp16=dict(
# the initial loss scale, defaults to 2**16
initial_scale=2**16,
# the minimum loss scale, defaults to None
min_scale=1,
# the number of steps to increase loss scale when no overflow occurs
growth_interval=1000,
),
# the multiplication factor for increasing loss scale, defaults to 2
growth_factor=2,
# the multiplication factor for decreasing loss scale, defaults to 0.5
backoff_factor=0.5,
# the maximum loss scale, defaults to None
max_scale=2**24,
# the number of overflows before decreasing loss scale, defaults to 2
hysteresis=2,
)
hybrid_zero_optimizer = dict(
# Enable low_level_optimzer overlap_communication
zero_overlap_communication=True,
# bucket size for nccl communication params
reduce_bucket_size=512 * 1024 * 1024,
# grad clipping
clip_grad_norm=1.0,
)
loss = dict(
label_smoothing=0,
)
adam = dict(
lr=1e-4,
adam_beta1=0.9,
adam_beta2=0.95,
adam_beta2_c=0,
adam_eps=1e-8,
weight_decay=0.01,
)
lr_scheduler = dict(
total_steps=data["total_steps"],
init_steps=0, # optimizer_warmup_step
warmup_ratio=0.01,
eta_min=1e-5,
last_epoch=-1,
)
beta2_scheduler = dict(
init_beta2=adam["adam_beta2"],
c=adam["adam_beta2_c"],
cur_iter=-1,
)
model = dict(
checkpoint=False,
num_attention_heads=NUM_ATTENTION_HEAD,
embed_split_hidden=True,
vocab_size=VOCAB_SIZE,
embed_grad_scale=1,
parallel_output=True,
hidden_size=HIDDEN_SIZE,
num_layers=NUM_LAYER,
mlp_ratio=MLP_RATIO,
apply_post_layer_norm=False,
dtype="torch.bfloat16",
norm_type="rmsnorm",
layer_norm_epsilon=1e-5,
)
"""
zero1 parallel:
1. if zero1 <= 0, The size of the zero process group is equal to the size of the dp process group,
so parameters will be divided within the range of dp.
2. if zero1 == 1, zero is not used, and all dp groups retain the full amount of model parameters.
3. zero1 > 1 and zero1 <= dp world size, the world size of zero is a subset of dp world size.
For smaller models, it is usually a better choice to split the parameters within nodes with a setting <= 8.
pipeline parallel: pipeline parallel size, only 1 is accepted currently.
tensor parallel: tensor parallel size, usually the number of GPUs per node, only 1 is accepted currently.
"""
parallel = dict(
zero1=8,
)
cudnn_deterministic = False
cudnn_benchmark = False

View File

@ -0,0 +1,20 @@
#!/bin/bash
rm -rf $GITHUB_WORKSPACE/llm_ckpts/20
srun -p llm2 --quotatype=spot -n 8 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ./ci_scripts/train/ci_7B_sft.py
file_dir="$GITHUB_WORKSPACE/llm_ckpts/20/*.pt"
source ./ci_scripts/common/basic_func.sh
num_files ${file_dir}
if [ $file_num -ne 21 ]
then
echo "The num of files is not right"
ls -l $file_dir
rm -rf $GITHUB_WORKSPACE/llm_ckpts
exit 1
fi

View File

@ -0,0 +1,17 @@
#!/bin/bash
rm -rf $GITHUB_WORKSPACE/llm_ckpts/20
srun -p llm2 -N 1 torchrun --nnodes=1 --nproc_per_node=8 --master_port=29501 train.py --config ./ci_scripts/train/ci_7B_sft.py --launcher "torch"
file_dir="$GITHUB_WORKSPACE/llm_ckpts/20/*.pt"
source ./ci_scripts/common/basic_func.sh
num_files ${file_dir}
if [ $file_num -ne 21 ]
then
echo "The num of files is not right"
ls -l $file_dir
rm -rf $GITHUB_WORKSPACE/llm_ckpts
exit 1
fi