mirror of https://github.com/InternLM/InternLM
Merge branch 'feature_add_moe' of github.com:blankde/InternLM into feature_add_moe_pp_zl
Conflicts: train.pypull/182/head
commit
7b1709a7ff
|
|
@ -7,44 +7,84 @@ on:
|
|||
paths-ignore:
|
||||
- "docs/**"
|
||||
- "**.md"
|
||||
env:
|
||||
WORKSPACE_PREFIX: $(echo $GITHUB_WORKSPACE |cut -d '/' -f 1-4)
|
||||
SLURM_PARTITION: llm
|
||||
|
||||
jobs:
|
||||
dataset-preparation:
|
||||
check-requirements:
|
||||
runs-on: [lmtest]
|
||||
steps:
|
||||
- name: mask env
|
||||
run: |
|
||||
echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: check-requirements
|
||||
run: |
|
||||
source activate internlm-env-test
|
||||
changed_files=$(git diff --name-only -r HEAD^1 HEAD)
|
||||
echo $changed_files
|
||||
if [[ $changed_files =~ "runtime.txt" ]]; then
|
||||
pip install -r requirements/runtime.txt
|
||||
fi
|
||||
|
||||
if [[ $changed_files =~ "torch.txt" ]]; then
|
||||
pip install -r requirements/torch.txt
|
||||
fi
|
||||
|
||||
dataset-preparation:
|
||||
if: ${{ always() }}
|
||||
needs: check-requirements
|
||||
runs-on: [lmtest]
|
||||
steps:
|
||||
- name: mask env
|
||||
run: |
|
||||
echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: raw-chinese-data
|
||||
run: |
|
||||
source activate internlm-env-test
|
||||
sh ./ci_scripts/data/tokenizer_chinese.sh
|
||||
sh ./ci_scripts/data/tokenizer_chinese.sh ${GITHUB_RUN_ID}-${GITHUB_JOB}
|
||||
|
||||
- name: alpaca-data
|
||||
run: |
|
||||
source activate internlm-env-test
|
||||
sh ./ci_scripts/data/tokenizer_alpaca.sh
|
||||
|
||||
|
||||
train:
|
||||
if: ${{ always() }}
|
||||
needs: check-requirements
|
||||
runs-on: [lmtest]
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: mask env
|
||||
run: |
|
||||
echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: slurm-train
|
||||
run: |
|
||||
source activate internlm-env-test
|
||||
sh ./ci_scripts/train/slurm_train.sh
|
||||
sh ./ci_scripts/train/slurm_train.sh ${GITHUB_RUN_ID}-${GITHUB_JOB}
|
||||
rm -rf $GITHUB_WORKSPACE/llm_ckpts
|
||||
|
||||
- name: torchrun-train
|
||||
run: |
|
||||
source activate internlm-env-test
|
||||
sh ./ci_scripts/train/torchrun.sh
|
||||
sh ./ci_scripts/train/torchrun.sh ${GITHUB_RUN_ID}-${GITHUB_JOB}
|
||||
rm -rf $GITHUB_WORKSPACE/llm_ckpts
|
||||
|
||||
convert-model-then-load:
|
||||
if: ${{ always() }}
|
||||
needs: check-requirements
|
||||
runs-on: [lmtest]
|
||||
steps:
|
||||
- name: mask env
|
||||
run: |
|
||||
echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: convert-model-then-load
|
||||
|
|
@ -53,16 +93,21 @@ jobs:
|
|||
export PYTHONPATH=$PWD:$PYTHONPATH
|
||||
sh ./ci_scripts/model/convert_to_hf.sh
|
||||
cd ./hf_ckpt
|
||||
srun -p llm2 python ../ci_scripts/model/loaded_as_transformer.py
|
||||
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 python ../ci_scripts/model/loaded_as_transformer.py
|
||||
cd ..
|
||||
rm -rf $GITHUB_WORKSPACE/hf_ckpt
|
||||
|
||||
load-chat-model-in-hf:
|
||||
if: ${{ always() }}
|
||||
needs: check-requirements
|
||||
runs-on: [lmtest]
|
||||
steps:
|
||||
- name: mask env
|
||||
run: |
|
||||
echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: chat-model-in-hf
|
||||
run: |
|
||||
source activate internlm-env-test
|
||||
srun -p llm2 python ./ci_scripts/model/demo_load_7B_chat_model.py
|
||||
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 python ./ci_scripts/model/demo_load_7B_chat_model.py
|
||||
|
|
|
|||
|
|
@ -1,12 +1,16 @@
|
|||
name: lint-check
|
||||
|
||||
on: [push]
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
branches:
|
||||
- "main"
|
||||
- "develop"
|
||||
|
||||
jobs:
|
||||
# lint check can be auto-executed by the workflow
|
||||
lint-check:
|
||||
runs-on: [internlm]
|
||||
if: github.repository == 'InternLM/InternLM'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,32 @@
|
|||
name: 'Close stale issues and PRs'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# check issue and pull request once at 01:30 a.m. every day
|
||||
- cron: '30 1 * * *'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v7
|
||||
with:
|
||||
stale-issue-message: 'This issue is marked as stale because it has been marked as invalid or awaiting response for 7 days without any further response. It will be closed in 7 days if the stale label is not removed or if there is no further response.'
|
||||
stale-pr-message: 'This PR is marked as stale because there has been no activity in the past 7 days. It will be closed in 7 days if the stale label is not removed or if there is no further updates.'
|
||||
close-issue-message: 'This issue is closed because it has been stale for 7 days. Please open a new issue if you have similar issues or you have any new updates now.'
|
||||
close-pr-message: 'This PR is closed because it has been stale for 7 days. Please reopen this PR if you have any updates and want to keep contributing the code.'
|
||||
# Labels on issues exempted from stale
|
||||
exempt-issue-labels: "enhancement,planned"
|
||||
days-before-issue-stale: 7
|
||||
days-before-pr-stale: 7
|
||||
days-before-issue-close: 7
|
||||
days-before-pr-close: 7
|
||||
# automatically remove the stale label when the issues or the pull requests are updated or commented
|
||||
remove-stale-when-updated: true
|
||||
operations-per-run: 50
|
||||
|
|
@ -115,6 +115,7 @@ venv.bak/
|
|||
*.pkl
|
||||
*.pkl.json
|
||||
*.log.json
|
||||
*.trace.json
|
||||
docs/modelzoo_statistics.md
|
||||
mmdet/.mim
|
||||
work_dirs/
|
||||
|
|
@ -143,3 +144,4 @@ core.*
|
|||
# Run
|
||||
llm_ckpts
|
||||
events.*
|
||||
memory_trace
|
||||
|
|
|
|||
|
|
@ -0,0 +1,14 @@
|
|||
assign:
|
||||
strategy:
|
||||
# random
|
||||
daily-shift-based
|
||||
schedule:
|
||||
'*/1 * * * *'
|
||||
assignees:
|
||||
- yhcc
|
||||
- yhcc
|
||||
- sunpengsdu
|
||||
- sunpengsdu
|
||||
- ZwwWayne
|
||||
- ZwwWayne
|
||||
- yhcc
|
||||
|
|
@ -32,7 +32,7 @@
|
|||
</div>
|
||||
|
||||
<p align="center">
|
||||
👋 加入我们的 <a href="https://discord.gg/xa29JuW87d" target="_blank">Discord</a> 和 <a href="https://github.com/InternLM/InternLM/assets/25839884/a6aad896-7232-4220-ac84-9e070c2633ce" target="_blank">微信社区</a>
|
||||
👋 加入我们的<a href="https://twitter.com/intern_lm" target="_blank">推特</a>、<a href="https://discord.gg/xa29JuW87d" target="_blank">Discord</a> 和 <a href="https://r.vansin.top/?r=internwx" target="_blank">微信社区</a>
|
||||
</p>
|
||||
|
||||
## 简介
|
||||
|
|
@ -119,19 +119,20 @@ streamlit run web_demo.py
|
|||
|
||||
1. 首先安装 LMDeploy:
|
||||
|
||||
```
|
||||
```bash
|
||||
python3 -m pip install lmdeploy
|
||||
```
|
||||
|
||||
2. 快速的部署命令如下:
|
||||
|
||||
```
|
||||
python3 -m lmdeploy.serve.turbomind.deploy InternLM-7B /path/to/internlm-7b/model hf
|
||||
```bash
|
||||
python3 -m lmdeploy.serve.turbomind.deploy internlm-chat-7b /path/to/internlm-7b/model
|
||||
```
|
||||
|
||||
3. 在导出模型后,你可以直接通过如下命令启动服务一个服务并和部署后的模型对话
|
||||
3. 在导出模型后,你可以直接通过如下命令启动服务,并在客户端与AI对话
|
||||
|
||||
```
|
||||
```bash
|
||||
bash workspace/service_docker_up.sh
|
||||
python3 -m lmdeploy.serve.client {server_ip_addresss}:33337
|
||||
```
|
||||
|
||||
|
|
|
|||
21
README.md
21
README.md
|
|
@ -32,13 +32,9 @@
|
|||
</div>
|
||||
|
||||
<p align="center">
|
||||
👋 join us on <a href="https://discord.gg/xa29JuW87d" target="_blank">Discord</a> and <a href="https://github.com/InternLM/InternLM/assets/25839884/a6aad896-7232-4220-ac84-9e070c2633ce" target="_blank">WeChat</a>
|
||||
👋 join us on <a href="https://twitter.com/intern_lm" target="_blank">Twitter</a>, <a href="https://discord.gg/xa29JuW87d" target="_blank">Discord</a> and <a href="https://r.vansin.top/?r=internwx" target="_blank">WeChat</a>
|
||||
</p>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
InternLM has open-sourced a 7 billion parameter base model and a chat model tailored for practical scenarios. The model has the following characteristics:
|
||||
|
|
@ -126,21 +122,22 @@ We use [LMDeploy](https://github.com/InternLM/LMDeploy) to complete the one-clic
|
|||
|
||||
1. First, install LMDeploy:
|
||||
|
||||
```
|
||||
```bash
|
||||
python3 -m pip install lmdeploy
|
||||
```
|
||||
```
|
||||
|
||||
2. Use the following command for quick deployment:
|
||||
|
||||
```
|
||||
python3 -m lmdeploy.serve.turbomind.deploy InternLM-7B /path/to/internlm-7b/model hf
|
||||
```
|
||||
```bash
|
||||
python3 -m lmdeploy.serve.turbomind.deploy internlm-chat-7b /path/to/internlm-chat-7b/model
|
||||
```
|
||||
|
||||
3. After exporting the model, you can start a server and have a conversation with the deployed model using the following command:
|
||||
|
||||
```
|
||||
```bash
|
||||
bash workspace/service_docker_up.sh
|
||||
python3 -m lmdeploy.serve.client {server_ip_addresss}:33337
|
||||
```
|
||||
```
|
||||
|
||||
[LMDeploy](https://github.com/InternLM/LMDeploy) provides a complete workflow for deploying InternLM. Please refer to the [deployment tutorial](https://github.com/InternLM/LMDeploy) for more details on deploying InternLM.
|
||||
|
||||
|
|
|
|||
|
|
@ -1,14 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
export exit_code=0
|
||||
|
||||
function if_exist() {
|
||||
ls -l $file_path
|
||||
exit_code_now=$?
|
||||
exit_code=$(($exit_code + $exit_code_now))
|
||||
}
|
||||
|
||||
function num_files() {
|
||||
file_num=$(ls -l $file_dir |wc -l)
|
||||
echo "there are $file_num files in $file_dir"
|
||||
#######################################
|
||||
# Calculate the number of files in a directory.
|
||||
# Call this function like this: num_files "${file_path}".
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# $1: the directory path
|
||||
# Returns:
|
||||
# the number of files in the directory
|
||||
#######################################
|
||||
num_files() {
|
||||
[[ $# -eq 1 ]] || return 1
|
||||
local file_num
|
||||
file_num=$(ls -l $1 | grep '^-' | wc -l)
|
||||
echo $file_num
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
retry_times=3
|
||||
for ((i=1;i<=$retry_times;i++));do
|
||||
jobid=$(squeue -o "%A %j" -u $USER | grep ${GITHUB_RUN_ID}-${GITHUB_JOB} | awk '{print $1}')
|
||||
if [[ -n "$jobid" ]];then
|
||||
echo "The job $jobid will be canceled."
|
||||
scancel $jobid
|
||||
sleep 0.5
|
||||
else
|
||||
echo "There are no more jobs that need to be canceled."
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $i -gt $retry_times ]];then
|
||||
echo "There have been tried $retry_times times. Please contact user $USER to confirm the job status."
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
readonly DATA_VOLUME=$(echo $GITHUB_WORKSPACE | cut -d '/' -f 1-4)/data
|
||||
|
|
@ -1,22 +1,50 @@
|
|||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
rm -rf /mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/result/*
|
||||
source ./ci_scripts/common/variables.sh
|
||||
[[ -n ${DATA_VOLUME} ]] || { echo "should set DATA_VOLUME first before ci, exit."; exit 1; }
|
||||
|
||||
python tools/alpaca_tokenizer.py /mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/alpaca_data.json /mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/result tools/V7_sft.model --split_ratio 0.1
|
||||
readonly SRC_DATASET_META=${DATA_VOLUME}/lm_data/alpaca_data/alpaca_data.json
|
||||
readonly RESULTS=${DATA_VOLUME}/lm_data/alpaca_data/result
|
||||
readonly TRAIN_DATASET=${RESULTS}/train/en/dataset.bin
|
||||
readonly TRAIN_DATASET_META=${RESULTS}/train/en/dataset.bin.meta
|
||||
readonly VALID_DATASET=${RESULTS}/valid/en/dataset.bin
|
||||
readonly VALID_DATASET_META=${RESULTS}/valid/en/dataset.bin.meta
|
||||
|
||||
file_one="/mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/result/train/en/dataset.bin"
|
||||
file_two="/mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/result/train/en/dataset.bin.meta"
|
||||
file_three="/mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/result/valid/en/dataset.bin"
|
||||
file_four="/mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/result/valid/en/dataset.bin.meta"
|
||||
file_list=($file_one $file_two $file_three $file_four)
|
||||
split_ratio=0.1
|
||||
exit_code=0
|
||||
|
||||
source ./ci_scripts/common/basic_func.sh
|
||||
for file_path in ${file_list[@]};
|
||||
do
|
||||
if_exist $file_path
|
||||
done
|
||||
|
||||
if [ $exit_code -ne 0 ]
|
||||
then
|
||||
echo "start to test alpaca_tokenizer.py."
|
||||
|
||||
if [[ -d ${RESULTS} ]]; then
|
||||
if ! rm -rf ${RESULTS}/*; then
|
||||
echo "cleaning test data in ${RESULTS} failed, exit."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ! -f ${SRC_DATASET_META} ]]; then
|
||||
echo "${SRC_DATASET_META} should be exist, exit."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
python tools/alpaca_tokenizer.py ${SRC_DATASET_META} ${RESULTS} tools/V7_sft.model --split_ratio ${split_ratio}
|
||||
[[ $? -ne 0 ]] && { echo "test alpaca_tokenizer.py failed."; exit_code=$(($exit_code + 1)); }
|
||||
|
||||
file_list=(${TRAIN_DATASET} ${TRAIN_DATASET_META} ${VALID_DATASET} ${VALID_DATASET_META})
|
||||
for file in ${file_list[@]}; do
|
||||
if [[ ! -f ${file} ]]; then
|
||||
echo "expect: ${file} exists, actual: not exist."
|
||||
exit_code=$(($exit_code + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
# clean the test files.
|
||||
if ! rm -rf ${RESULTS}/*; then
|
||||
echo "cleaning test data in ${RESULTS} failed."
|
||||
exit_code=$(($exit_code + 1))
|
||||
fi
|
||||
|
||||
exit $exit_code
|
||||
|
|
|
|||
|
|
@ -1,19 +1,42 @@
|
|||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
rm -rf /mnt/petrelfs/qa-caif-cicd/data/lm_data/cn_data/result.*
|
||||
srun -p llm2 python tools/tokenizer.py --text_input_path /mnt/petrelfs/qa-caif-cicd/data/lm_data/cn_data/raw_data.txt --bin_output_path /mnt/petrelfs/qa-caif-cicd/data/lm_data/cn_data/result.bin
|
||||
source ./ci_scripts/common/variables.sh
|
||||
[[ -n ${DATA_VOLUME} ]] || { echo "should set DATA_VOLUME first before ci."; exit 1; }
|
||||
|
||||
file_one="/mnt/petrelfs/qa-caif-cicd/data/lm_data/cn_data/result.bin"
|
||||
file_two="/mnt/petrelfs/qa-caif-cicd/data/lm_data/cn_data/result.bin.meta"
|
||||
file_list=($file_one $file_two)
|
||||
readonly DATA=${DATA_VOLUME}/lm_data/cn_data/raw_data.txt
|
||||
readonly RESULT=${DATA_VOLUME}/lm_data/cn_data/result.bin
|
||||
readonly RESULT_META=${DATA_VOLUME}/lm_data/cn_data/result.bin.meta
|
||||
readonly RESULTS=${DATA_VOLUME}/lm_data/cn_data/result.*
|
||||
exit_code=0
|
||||
|
||||
source ./ci_scripts/common/basic_func.sh
|
||||
for file_path in ${file_list[@]};
|
||||
do
|
||||
if_exist $file_path
|
||||
|
||||
echo "start to test tokenizer.py."
|
||||
|
||||
num=$(num_files "${RESULTS}")
|
||||
if [[ ${num} -gt 0 ]]; then
|
||||
if ! rm -rf ${RESULTS}; then
|
||||
echo "cleaning test data ${RESULTS} failed, exit."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
srun -p ${SLURM_PARTITION} --job-name=$1 --gpus-per-task=1 python tools/tokenizer.py --text_input_path ${DATA} --bin_output_path ${RESULT}
|
||||
[[ $? -ne 0 ]] && { echo "test tokenizer.py failed."; exit_code=$(($exit_code + 1)); }
|
||||
|
||||
file_list=($RESULT $RESULT_META)
|
||||
for file in ${file_list[@]}; do
|
||||
if [[ ! -f ${file} ]]; then
|
||||
echo "expect: ${file} exists, actual: not exist."
|
||||
exit_code=$(($exit_code + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $exit_code -ne 0 ]
|
||||
then
|
||||
exit 1
|
||||
# clean the test files.
|
||||
if ! rm -rf ${RESULTS}/*; then
|
||||
echo "cleaning cached file in ${RESULTS} failed."
|
||||
exit_code=$(($exit_code + 1))
|
||||
fi
|
||||
|
||||
exit $exit_code
|
||||
|
|
|
|||
|
|
@ -1,33 +1,47 @@
|
|||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
rm -rf ./hf_ckpt/*
|
||||
python ./tools/transformers/convert2hf.py --src_folder /mnt/petrelfs/qa-caif-cicd/data/lm_data/alpaca_data/llm_ckpts/20 --tgt_folder hf_ckpt/ --tokenizer ./tools/V7_sft.model
|
||||
source ./ci_scripts/common/variables.sh
|
||||
[[ -n ${DATA_VOLUME} ]] || { echo "should set DATA_VOLUME first before ci, exit."; exit 1; }
|
||||
[[ -n ${GITHUB_WORKSPACE} ]] || { echo "should set GITHUB_WORKSPACE first before ci, exit."; exit 1; }
|
||||
|
||||
#assert exists model
|
||||
file_one="$GITHUB_WORKSPACE/hf_ckpt/tokenizer.model"
|
||||
file_two="$GITHUB_WORKSPACE/hf_ckpt/config.json"
|
||||
file_three="$GITHUB_WORKSPACE/hf_ckpt/modeling_internlm.py"
|
||||
file_list=($file_one $file_two $file_three)
|
||||
file_dir="$GITHUB_WORKSPACE/hf_ckpt/*"
|
||||
readonly CKPTS_INPUT="${DATA_VOLUME}/lm_data/alpaca_data/llm_ckpts/20"
|
||||
readonly CKPTS_OUTPUT="${GITHUB_WORKSPACE}/hf_ckpt"
|
||||
readonly TOKENIZER="${GITHUB_WORKSPACE}/hf_ckpt/tokenizer.model"
|
||||
readonly CONFIG="${GITHUB_WORKSPACE}/hf_ckpt/config.json"
|
||||
readonly INERNLM="${GITHUB_WORKSPACE}/hf_ckpt/modeling_internlm.py"
|
||||
exit_code=0
|
||||
expected_num=9
|
||||
|
||||
source ./ci_scripts/common/basic_func.sh
|
||||
|
||||
for file_path in ${file_list[@]};
|
||||
do
|
||||
if_exist $file_path
|
||||
echo "start to test convert2hf.py."
|
||||
|
||||
if [[ -d ${CKPTS_OUTPUT} ]]; then
|
||||
if ! rm -rf ${CKPTS_OUTPUT}/*; then
|
||||
echo "cleaning cached file in ${CKPTS_OUTPUT} failed, exit."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
python ./tools/transformers/convert2hf.py --src_folder ${CKPTS_INPUT} --tgt_folder ${CKPTS_OUTPUT} --tokenizer ./tools/V7_sft.model
|
||||
[[ $? -ne 0 ]] && { echo "test convert2hf.py failed."; exit_code=$(($exit_code + 1)); }
|
||||
|
||||
#assert exists model
|
||||
file_list=($TOKENIZER $CONFIG $INERNLM)
|
||||
for file in ${file_list[@]}; do
|
||||
if [[ ! -f ${file} ]];then
|
||||
echo "file ${file} does not exist."
|
||||
exit_code=$(($exit_code + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
num=$(num_files "${CKPTS_OUTPUT}")
|
||||
|
||||
num_files ${file_dir}
|
||||
|
||||
if [ $file_num -ne 9 ]
|
||||
then
|
||||
echo "The num of files is not right"
|
||||
ls -l $file_dir
|
||||
if [[ ${num} -ne ${expected_num} ]]; then
|
||||
echo "expect: ${expected_num} files, actual: ${num} files."
|
||||
exit_code=$(($exit_code + 1))
|
||||
fi
|
||||
|
||||
if [ $exit_code -ne 0 ]
|
||||
then
|
||||
exit 1
|
||||
fi
|
||||
# NOTICE: should not remove the cached files, because the cached files will be used in the next test case.
|
||||
exit $exit_code
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
|
||||
#!/usr/bin/env python
|
||||
# -*- encoding: utf-8 -*-
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- encoding: utf-8 -*-
|
||||
from transformers import AutoModel
|
||||
|
||||
model = AutoModel.from_pretrained("../hf_ckpt/", trust_remote_code=True).cuda()
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ MODEL_ONLY_FOLDER = "local:llm_ckpts/xxxx"
|
|||
SAVE_CKPT_FOLDER = "local:llm_ckpts"
|
||||
# LOAD_CKPT_FOLDER = "local:llm_ckpts/49"
|
||||
ckpt = dict(
|
||||
enable_save_ckpt=True,
|
||||
# Path to save training ckpt.
|
||||
save_ckpt_folder=SAVE_CKPT_FOLDER,
|
||||
# Path to continue training ckpt (load model weights and scheduler/context states).
|
||||
|
|
|
|||
|
|
@ -1,20 +1,37 @@
|
|||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
rm -rf $GITHUB_WORKSPACE/llm_ckpts/20
|
||||
[[ -n ${GITHUB_WORKSPACE} ]] || { echo "should set GITHUB_WORKSPACE first before ci, exit."; exit 1; }
|
||||
readonly CKPTS_PATH="$GITHUB_WORKSPACE/llm_ckpts"
|
||||
readonly CKPTS20_PATH="$GITHUB_WORKSPACE/llm_ckpts/20"
|
||||
readonly CKPTS20_OUTPUT="${CKPTS20_PATH}/*.pt"
|
||||
expected_num=21
|
||||
exit_code=0
|
||||
|
||||
srun -p llm2 --quotatype=spot -n 8 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ./ci_scripts/train/ci_7B_sft.py
|
||||
|
||||
file_dir="$GITHUB_WORKSPACE/llm_ckpts/20/*.pt"
|
||||
source ./ci_scripts/common/basic_func.sh
|
||||
|
||||
num_files ${file_dir}
|
||||
echo "start to test slurm training."
|
||||
|
||||
if [ $file_num -ne 21 ]
|
||||
then
|
||||
echo "The num of files is not right"
|
||||
ls -l $file_dir
|
||||
rm -rf $GITHUB_WORKSPACE/llm_ckpts
|
||||
if [[ -d ${CKPTS20_PATH} ]]; then
|
||||
if ! rm -rf ${CKPTS20_PATH}/*; then
|
||||
echo "cleaning cached file in ${CKPTS20_PATH} failed, exit."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
srun -p ${SLURM_PARTITION} --exclusive --job-name=$1 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ./ci_scripts/train/ci_7B_sft.py
|
||||
[[ $? -ne 0 ]] && { echo "test slurm training failed."; exit_code=$(($exit_code + 1)); }
|
||||
|
||||
num=$(num_files "${CKPTS20_OUTPUT}")
|
||||
if [[ ${num} -ne ${expected_num} ]]; then
|
||||
echo "expect: ${expected_num} files, actual: ${num} files."
|
||||
exit_code=$(($exit_code + 1))
|
||||
fi
|
||||
|
||||
# clean the test files.
|
||||
if ! rm -rf ${CKPTS_PATH}/*; then
|
||||
echo "cleaning cached file in ${CKPTS_PATH} failed."
|
||||
exit_code=$(($exit_code + 1))
|
||||
fi
|
||||
|
||||
exit $exit_code
|
||||
|
|
|
|||
|
|
@ -1,17 +1,37 @@
|
|||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
rm -rf $GITHUB_WORKSPACE/llm_ckpts/20
|
||||
srun -p llm2 -N 1 torchrun --nnodes=1 --nproc_per_node=8 --master_port=29501 train.py --config ./ci_scripts/train/ci_7B_sft.py --launcher "torch"
|
||||
[[ -n ${GITHUB_WORKSPACE} ]] || { echo "should set GITHUB_WORKSPACE first before ci, exit."; exit 1; }
|
||||
readonly CKPTS_PATH="$GITHUB_WORKSPACE/llm_ckpts"
|
||||
readonly CKPTS20_PATH="$GITHUB_WORKSPACE/llm_ckpts/20"
|
||||
readonly CKPTS_OUTPUT="${CKPTS20_PATH}/*.pt"
|
||||
expected_num=21
|
||||
exit_code=0
|
||||
|
||||
file_dir="$GITHUB_WORKSPACE/llm_ckpts/20/*.pt"
|
||||
source ./ci_scripts/common/basic_func.sh
|
||||
|
||||
num_files ${file_dir}
|
||||
echo "start to test torch training."
|
||||
|
||||
if [ $file_num -ne 21 ]
|
||||
then
|
||||
echo "The num of files is not right"
|
||||
ls -l $file_dir
|
||||
rm -rf $GITHUB_WORKSPACE/llm_ckpts
|
||||
if [[ -d ${CKPTS20_PATH} ]]; then
|
||||
if ! rm -rf ${CKPTS20_PATH}/*; then
|
||||
echo "cleaning cached file in ${CKPTS20_PATH} failed, exit."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
srun -p ${SLURM_PARTITION} --exclusive --job-name=$1 -N 1 torchrun --nnodes=1 --nproc_per_node=8 --master_port=29501 train.py --config ./ci_scripts/train/ci_7B_sft.py --launcher torch
|
||||
[[ $? -ne 0 ]] && { echo "test torch training failed."; exit_code=$(($exit_code + 1)); }
|
||||
|
||||
num=$(num_files "${CKPTS_OUTPUT}")
|
||||
if [[ ${num} -ne ${expected_num} ]]; then
|
||||
echo "expect: ${expected_num} files, actual: ${num} files."
|
||||
exit_code=$(($exit_code + 1))
|
||||
fi
|
||||
|
||||
# clean the test files.
|
||||
if ! rm -rf ${CKPTS_PATH}/*; then
|
||||
echo "cleaning cached file in ${CKPTS_PATH} failed."
|
||||
exit_code=$(($exit_code + 1))
|
||||
fi
|
||||
|
||||
exit $exit_code
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ beta2_scheduler = dict(
|
|||
)
|
||||
|
||||
model = dict(
|
||||
checkpoint=False,
|
||||
checkpoint=False, # The proportion of layers for activation aheckpointing, the optional value are True/False/[0-1]
|
||||
num_attention_heads=NUM_ATTENTION_HEAD,
|
||||
embed_split_hidden=True,
|
||||
vocab_size=VOCAB_SIZE,
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ HIDDEN_SIZE = 4096
|
|||
NUM_LAYER = 32
|
||||
MLP_RATIO = 8 / 3
|
||||
model = dict(
|
||||
checkpoint=False,
|
||||
checkpoint=False, # The proportion of layers for activation aheckpointing, the optional value are True/False/[0-1]
|
||||
num_attention_heads=NUM_ATTENTION_HEAD,
|
||||
embed_split_hidden=True,
|
||||
vocab_size=VOCAB_SIZE,
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ HIDDEN_SIZE = 4096
|
|||
NUM_LAYER = 32
|
||||
MLP_RATIO = 8 / 3
|
||||
model = dict(
|
||||
checkpoint=False,
|
||||
checkpoint=False, # 进行重计算的模型层数比例,可选值为 True/False/[0-1]
|
||||
num_attention_heads=NUM_ATTENTION_HEAD,
|
||||
embed_split_hidden=True,
|
||||
vocab_size=VOCAB_SIZE,
|
||||
|
|
|
|||
|
|
@ -547,6 +547,8 @@ def send_backward_and_recv_next_backward_async(
|
|||
|
||||
|
||||
class AsynCommunicator:
|
||||
"""AsynCommunicator for managing async communication."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
tensor_to_send: Union[torch.Tensor, List[torch.Tensor]],
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
# adopted from https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/communication
|
||||
|
||||
from functools import wraps
|
||||
from typing import List, Tuple, Union
|
||||
|
||||
import torch
|
||||
|
|
|
|||
|
|
@ -158,13 +158,6 @@ class SchedulerMetricHook(SchedulerHook):
|
|||
self._post_func = metric
|
||||
self._skip = skip
|
||||
|
||||
if skip:
|
||||
# init timer only.
|
||||
timer("fwd")
|
||||
timer("bwd")
|
||||
timer("cal_loss")
|
||||
timer("post_fn")
|
||||
|
||||
def before_forward(self, scheduler, inputs) -> None:
|
||||
if not self._skip:
|
||||
timer("fwd").start()
|
||||
|
|
@ -190,8 +183,5 @@ class SchedulerMetricHook(SchedulerHook):
|
|||
timer("bwd").stop()
|
||||
|
||||
def post_helper_func(self, scheduler, outputs, label) -> None:
|
||||
if not self._skip:
|
||||
timer("post_fn").start()
|
||||
if self._post_func is not None:
|
||||
self._post_func(outputs, label)
|
||||
timer("post_fn").stop()
|
||||
|
|
|
|||
|
|
@ -108,6 +108,9 @@ def args_sanity_check():
|
|||
logger.info(f"valid_every: {data.valid_every}")
|
||||
|
||||
# processing the checkpoint config
|
||||
if "enable_save_ckpt" not in gpc.config.ckpt:
|
||||
gpc.config.ckpt._add_item("enable_save_ckpt", False)
|
||||
|
||||
if "checkpoint_every" not in gpc.config.ckpt or gpc.config.ckpt.checkpoint_every <= 0:
|
||||
gpc.config.ckpt._add_item("checkpoint_every", float("inf"))
|
||||
|
||||
|
|
@ -125,21 +128,19 @@ def args_sanity_check():
|
|||
|
||||
if "async_upload" not in gpc.config.ckpt:
|
||||
gpc.config.ckpt._add_item("async_upload", False)
|
||||
else:
|
||||
|
||||
if "async_upload_tmp_folder" not in gpc.config.ckpt:
|
||||
gpc.config.ckpt._add_item("async_upload_tmp_folder", "/dev/shm/internlm_tmp_ckpt/")
|
||||
|
||||
if gpc.config.ckpt.async_upload:
|
||||
assert "save_ckpt_folder" in gpc.config.ckpt
|
||||
if "boto3:" not in gpc.config.ckpt.save_ckpt_folder:
|
||||
if gpc.is_rank_for_log():
|
||||
logger.warning(
|
||||
"Storing ckpt on file system does not support asynchronous storage, will use sync save!"
|
||||
)
|
||||
logger.warning("Storing ckpt on file system does not support asynchronous storage, will use sync save!")
|
||||
gpc.config.ckpt.async_upload = False
|
||||
else:
|
||||
if "async_upload_tmp_folder" not in gpc.config.ckpt:
|
||||
gpc.config.ckpt._add_item("async_upload_tmp_folder", "/dev/shm/internlm_tmp_ckpt/")
|
||||
|
||||
if "snapshot_ckpt_folder" not in gpc.config.ckpt:
|
||||
gpc.config.ckpt._add_item("snapshot_ckpt_folder", os.path.join(gpc.config.ckpt.save_ckpt_folder), "snapshot")
|
||||
gpc.config.ckpt._add_item("snapshot_ckpt_folder", os.path.join(gpc.config.ckpt.save_ckpt_folder, "snapshot"))
|
||||
|
||||
if "oss_snapshot_freq" not in gpc.config.ckpt and gpc.config.ckpt.checkpoint_every != float("inf"):
|
||||
gpc.config.ckpt._add_item("oss_snapshot_freq", gpc.config.ckpt.checkpoint_every / 2)
|
||||
|
|
@ -149,14 +150,14 @@ def args_sanity_check():
|
|||
gpc.config.ckpt.load_ckpt_folder is not None and gpc.config.ckpt.load_model_only_folder is not None
|
||||
), "'load_ckpt_folder' and 'load_model_only_folder' cannot be set at the same time."
|
||||
|
||||
if "enable_save_ckpt" not in gpc.config.ckpt:
|
||||
gpc.config.ckpt._add_item("enable_save_ckpt", False)
|
||||
|
||||
if gpc.is_rank_for_log():
|
||||
logger.info("+" * 15 + " Ckpt Info " + "+" * 15) # pylint: disable=W1201
|
||||
logger.info(f"is enable save ckpt: {gpc.config.ckpt.enable_save_ckpt}")
|
||||
logger.info(f"save_ckpt_folder: {gpc.config.ckpt.save_ckpt_folder}")
|
||||
logger.info(f"checkpoint_every: {gpc.config.ckpt.checkpoint_every}")
|
||||
logger.info(f"async_upload: {gpc.config.ckpt.async_upload}")
|
||||
if gpc.config.ckpt.async_upload:
|
||||
logger.info(f"async_upload_tmp_folder: {gpc.config.ckpt.async_upload_tmp_folder}")
|
||||
|
||||
# initialization storage manager
|
||||
init_storage_manager(gpc.config.ckpt)
|
||||
|
|
@ -180,9 +181,10 @@ def args_sanity_check():
|
|||
logger.info(f"cudnn.deterministic: {torch.backends.cudnn.deterministic }")
|
||||
logger.info(f"clip_grad_norm: {clip_grad_norm}")
|
||||
|
||||
if "dtype" not in gpc.config.model:
|
||||
model = gpc.config.model
|
||||
if "dtype" not in model:
|
||||
logger.warning("dtype is not set, use torch.float16 by defalut!")
|
||||
gpc.config.model._add_item("dtype", torch.float16)
|
||||
model._add_item("dtype", torch.float16)
|
||||
else:
|
||||
if gpc.config.model.dtype == "torch.bfloat16":
|
||||
gpc.config.model.dtype = torch.bfloat16
|
||||
|
|
@ -205,6 +207,16 @@ def args_sanity_check():
|
|||
"torch.tf32",
|
||||
]
|
||||
|
||||
if "checkpoint" in model:
|
||||
if model.checkpoint is True:
|
||||
model.checkpoint = 1
|
||||
elif model.checkpoint is False:
|
||||
model.checkpoint = 0
|
||||
else:
|
||||
assert (
|
||||
model.checkpoint >= 0 and model.checkpoint <= 1
|
||||
), f'model.checkpoint: "{model.checkpoint}" should >=0 and <=1'
|
||||
|
||||
if gpc.is_rank_for_log():
|
||||
logger.info("+" * 15 + " Model Info " + "+" * 15) # pylint: disable=W1201
|
||||
logger.info(f"Model: {gpc.config.model}")
|
||||
|
|
|
|||
|
|
@ -312,9 +312,8 @@ class PackedFlashInternLm1D(nn.Module):
|
|||
attn_drop_rate (float): The dropout rate of attention module. 0.0 by default.
|
||||
drop_rate (float): The dropout rate of input hidden state. 0.0 by default.
|
||||
dtype (torch.dtype): The type of data. torch.float by default.
|
||||
checkpoint (bool): Whether to use checkpointing to save VRAM. True by default.
|
||||
checkpoint_fraction (float): The proportion of layers that need to be checkpointed compared to the total number
|
||||
of layers. 1.0 by default.
|
||||
checkpoint (float): The proportion of layers that need to be checkpointed compared to the total number
|
||||
of layers. 0.0 by default.
|
||||
layer_norm_epsilon (float): A value added to the denominator for numerical stability. 1e-6 by default.
|
||||
first (bool): Whether input embedding layer or not. False by default.
|
||||
last (bool): Whether output embedding layer or not. False by default.
|
||||
|
|
@ -350,8 +349,7 @@ class PackedFlashInternLm1D(nn.Module):
|
|||
attn_drop_rate: float = 0.0,
|
||||
drop_rate: float = 0.0,
|
||||
dtype: torch.dtype = torch.float,
|
||||
checkpoint: bool = False,
|
||||
checkpoint_fraction: float = 1.0,
|
||||
checkpoint: float = 0.0,
|
||||
layer_norm_epsilon: float = 1e-5,
|
||||
first: bool = False,
|
||||
last: bool = False,
|
||||
|
|
@ -379,12 +377,8 @@ class PackedFlashInternLm1D(nn.Module):
|
|||
):
|
||||
super().__init__()
|
||||
|
||||
self.use_flash_attn = use_flash_attn
|
||||
if checkpoint_fraction <= 0:
|
||||
checkpoint = False
|
||||
if not checkpoint:
|
||||
checkpoint_fraction = 0
|
||||
checkpoint_layer_num = num_layers * checkpoint_fraction
|
||||
checkpoint_layer_num = int(num_layers * checkpoint)
|
||||
|
||||
if is_reward:
|
||||
head_cls = RewardModelLinear
|
||||
else:
|
||||
|
|
@ -524,11 +518,6 @@ def _build_generic_model_1d(num_layers, num_chunks, device=torch.device("cuda"),
|
|||
|
||||
models = []
|
||||
|
||||
if kwargs["checkpoint"] is True:
|
||||
kwargs["checkpoint_fraction"] = 1.0
|
||||
else:
|
||||
kwargs["checkpoint_fraction"] = 0
|
||||
|
||||
for start, end in parts:
|
||||
kwargs["num_layers"] = end - start
|
||||
kwargs["first"] = start == 0
|
||||
|
|
@ -551,7 +540,7 @@ def _build_generic_model_1d(num_layers, num_chunks, device=torch.device("cuda"),
|
|||
@MODEL_INITIALIZER.register_module(module_name=MODEL_TYPE)
|
||||
def build_model_with_cfg(
|
||||
num_chunks=1,
|
||||
checkpoint=False,
|
||||
checkpoint=0.0,
|
||||
dtype=torch.float,
|
||||
embed_split_hidden=False,
|
||||
num_layers=48,
|
||||
|
|
|
|||
|
|
@ -7,23 +7,25 @@ from torch.nn import init
|
|||
from torch.nn.parameter import Parameter
|
||||
|
||||
|
||||
def manual_rms_norm(input, normalized_shape, weight, eps):
|
||||
def manual_rms_norm(my_input, normalized_shape, weight, eps):
|
||||
# layer norm should always be calculated in float32
|
||||
dims = tuple(i for i in range(-1, -len(normalized_shape) - 1, -1))
|
||||
variance = input.to(torch.float32).pow(2).mean(dims, keepdim=True)
|
||||
input = input * torch.rsqrt(variance + eps)
|
||||
variance = my_input.to(torch.float32).pow(2).mean(dims, keepdim=True)
|
||||
my_input = my_input * torch.rsqrt(variance + eps)
|
||||
|
||||
if weight is None:
|
||||
return input
|
||||
return my_input
|
||||
|
||||
# convert into half-precision if necessary
|
||||
if weight.dtype in [torch.float16, torch.bfloat16]:
|
||||
input = input.to(weight.dtype)
|
||||
my_input = my_input.to(weight.dtype)
|
||||
|
||||
return weight * input
|
||||
return weight * my_input
|
||||
|
||||
|
||||
class RMSNormTorch(torch.nn.Module):
|
||||
"""A custom PyTorch module for RMS normalization."""
|
||||
|
||||
def __init__(self, normalized_shape, eps=1e-5):
|
||||
super().__init__()
|
||||
|
||||
|
|
@ -34,8 +36,8 @@ class RMSNormTorch(torch.nn.Module):
|
|||
self.weight = Parameter(torch.empty(*normalized_shape))
|
||||
self.reset_parameters()
|
||||
|
||||
def forward(self, input: torch.Tensor):
|
||||
return manual_rms_norm(input, self.normalized_shape, self.weight, self.eps)
|
||||
def forward(self, _input: torch.Tensor):
|
||||
return manual_rms_norm(_input, self.normalized_shape, self.weight, self.eps)
|
||||
|
||||
def reset_parameters(self):
|
||||
init.ones_(self.weight)
|
||||
|
|
|
|||
|
|
@ -88,15 +88,17 @@ def gather_forward_split_backward(input_, parallel_mode, dim):
|
|||
return _GatherForwardSplitBackward.apply(input_, parallel_mode, dim)
|
||||
|
||||
|
||||
def linear_bias_wgrad_torch(input, grad_output, has_d_bias):
|
||||
assert input.dtype == grad_output.dtype
|
||||
grad_weight = torch.matmul(grad_output.t(), input)
|
||||
def linear_bias_wgrad_torch(my_input, grad_output, has_d_bias):
|
||||
assert my_input.dtype == grad_output.dtype
|
||||
grad_weight = torch.matmul(grad_output.t(), my_input)
|
||||
grad_bias = grad_output.sum(dim=0) if has_d_bias else None
|
||||
return grad_weight, grad_bias
|
||||
|
||||
|
||||
# adpated from https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/ops/fused_dense.py
|
||||
class FusedDenseFuncTorch(FusedDenseFunc):
|
||||
"""A custom PyTorch module extending FusedDenseFunc."""
|
||||
|
||||
@staticmethod
|
||||
@custom_bwd
|
||||
def backward(ctx, grad_output, *args):
|
||||
|
|
@ -173,8 +175,8 @@ class _SplitForwardGatherBackward(torch.autograd.Function):
|
|||
"""
|
||||
|
||||
@staticmethod
|
||||
def symbolic(graph, input_):
|
||||
return _split(input_)
|
||||
def symbolic(input_):
|
||||
return _split(input_, parallel_mode=None)
|
||||
|
||||
@staticmethod
|
||||
def forward(ctx, input_, parallel_mode, dim):
|
||||
|
|
@ -201,7 +203,7 @@ def try_import_RMSNorm():
|
|||
|
||||
return RMSNorm
|
||||
except ModuleNotFoundError:
|
||||
logger.warn("The torch implementation for MixFusedRMSNorm is slower than apex. Please note this!")
|
||||
logger.warning("The torch implementation for MixFusedRMSNorm is slower than apex. Please note this!")
|
||||
from internlm.model.norm import RMSNormTorch as RMSNorm
|
||||
|
||||
return RMSNorm
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- encoding: utf-8 -*-
|
||||
|
||||
import math
|
||||
from functools import partial
|
||||
|
||||
import torch
|
||||
|
|
@ -33,6 +34,7 @@ from internlm.utils.megatron_timers import megatron_timer as timer
|
|||
|
||||
from .utils import compute_norm
|
||||
|
||||
inf = math.inf
|
||||
logger = get_logger(__file__)
|
||||
|
||||
|
||||
|
|
@ -166,26 +168,26 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
# partition these param groups for data parallel training
|
||||
# and add buffers to parameter store for future access
|
||||
for group_id, param_group in enumerate(self.optim.param_groups):
|
||||
if "moe" in param_group.keys() and param_group["moe"]:
|
||||
print("true", flush=True)
|
||||
continue
|
||||
|
||||
group_params = param_group["params"]
|
||||
|
||||
# add the fp16 params to fp16_param_groups for bookkeeping
|
||||
self._fp16_param_groups[group_id] = group_params
|
||||
|
||||
# assign parameters to ranks the params in the list are sorted
|
||||
params_per_rank, no_params_ranks = self._partition_param_list(group_params)
|
||||
params_per_rank, no_params_ranks = self._partition_param_list(param_group)
|
||||
self.param_group_no_params_ranks.append(no_params_ranks)
|
||||
self.param_group_has_params.append(self._zero_local_rank not in no_params_ranks)
|
||||
|
||||
# store the mapping between param to rank each param should belong to only one rank
|
||||
# store the mapping between param to rank each param should belong to only one rank.
|
||||
# we can skip the moe param and do not keep them in _param_store to save memory
|
||||
# (means we need to deal with moe param in a different way), but it will increase
|
||||
# complexity and reduce code readablity.
|
||||
for rank, params in enumerate(params_per_rank):
|
||||
# check whether any rank is not assigned params.
|
||||
if len(params) != 0:
|
||||
self._param_store.add_fp16_param_list_by_rank_group(rank, group_id, params)
|
||||
for param in params:
|
||||
setattr(param, "group_id", group_id)
|
||||
self._param_store.set_param_to_rank(param, rank)
|
||||
|
||||
# move to cpu to make room to create the flat tensor
|
||||
|
|
@ -261,12 +263,27 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
def num_param_groups(self):
|
||||
return len(self._fp16_param_groups)
|
||||
|
||||
def _partition_param_list(self, param_list):
|
||||
def _get_real_dp_process_group(self, param_groups):
|
||||
if "moe" in param_groups.keys() and param_groups["moe"]:
|
||||
return ParallelMode.EXPERT_DATA
|
||||
else:
|
||||
return ParallelMode.DATA
|
||||
|
||||
def _partition_param_list(self, param_group):
|
||||
no_params_ranks = []
|
||||
params_per_rank = [[] for _ in range(self._zero_world_size)]
|
||||
numel_per_rank = [0 for _ in range(self._zero_world_size)]
|
||||
self.params_per_rank_id_dict.append([[] for _ in range(self._zero_world_size)])
|
||||
param_list = param_group["params"]
|
||||
|
||||
if "moe" in param_group.keys() and param_group["moe"]:
|
||||
# just add current params to params_per_rank[_zero_local_rank]
|
||||
params_per_rank[self._zero_local_rank] = list(param_list)
|
||||
self.params_per_rank_id_dict[-1][self._zero_local_rank].append(None)
|
||||
no_params_ranks = list(range(self._zero_world_size))
|
||||
no_params_ranks.pop(self._zero_world_size)
|
||||
|
||||
else:
|
||||
sorted_params = sorted(param_list, key=lambda x: x.numel(), reverse=True)
|
||||
for i, param in enumerate(sorted_params):
|
||||
global_id = str(i)
|
||||
|
|
@ -296,6 +313,7 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
for group_id in range(self.num_param_groups):
|
||||
param_group = self._fp16_param_groups[group_id]
|
||||
for param in param_group:
|
||||
# we should not reduce the param in moe
|
||||
if param.requires_grad and not is_moe_param(param):
|
||||
reduce_rank = None
|
||||
|
||||
|
|
@ -327,7 +345,7 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
# if full, will reduce the grads already in the bucket
|
||||
# after reduction, the bucket will be empty
|
||||
if self._bucket_store.num_elements_in_bucket(reduce_rank) + param_size > self._reduce_bucket_size:
|
||||
self._reduce_grads_stored_in_bucket(reduce_rank)
|
||||
self._reduce_grads_stored_in_bucket(reduce_rank, last_bucket=False)
|
||||
|
||||
# the param must not be reduced to ensure correctness
|
||||
is_param_reduced = self._param_store.is_param_reduced(param)
|
||||
|
|
@ -345,7 +363,7 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
self._bucket_store.add_grad(param.grad, reduce_rank)
|
||||
self._bucket_store.add_param(param, reduce_rank)
|
||||
|
||||
def _reduce_grads_stored_in_bucket(self, reduce_rank=None):
|
||||
def _reduce_grads_stored_in_bucket(self, reduce_rank=None, last_bucket=False):
|
||||
# reduce grads
|
||||
self._reduce_grads_by_rank(
|
||||
reduce_rank=reduce_rank,
|
||||
|
|
@ -353,14 +371,6 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
bucket_size=self._bucket_store.num_elements_in_bucket(reduce_rank),
|
||||
)
|
||||
|
||||
# use communication stream if overlapping
|
||||
# communication with computation
|
||||
if self._overlap_communication:
|
||||
stream = self._comm_stream
|
||||
else:
|
||||
stream = torch.cuda.current_stream()
|
||||
|
||||
with torch.cuda.stream(stream):
|
||||
params_in_bucket = self._bucket_store.get_param(reduce_rank=reduce_rank)
|
||||
|
||||
for param in params_in_bucket:
|
||||
|
|
@ -378,6 +388,11 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
# update the flag
|
||||
self._param_store.set_param_reduction_state(param, True)
|
||||
|
||||
if self._param_store.belongs_to_current_rank(param):
|
||||
self._param_store.add_reduced_param_for_compute_norm(param, last_bucket)
|
||||
else:
|
||||
self._param_store.add_previous_reduced_param(param)
|
||||
|
||||
self._bucket_store.reset_by_rank(reduce_rank)
|
||||
|
||||
def _reduce_grads_by_rank(self, reduce_rank, grads, bucket_size):
|
||||
|
|
@ -395,9 +410,9 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
|
||||
def _reduce_and_copy(self, bucket: TensorBucket, reduce_rank):
|
||||
if self._overlap_communication:
|
||||
torch.cuda.synchronize()
|
||||
self._param_store.clear_grads_of_previous_reduced_params()
|
||||
stream = self._comm_stream
|
||||
stream.synchronize()
|
||||
self._param_store.clear_grads_of_previous_reduced_params()
|
||||
else:
|
||||
stream = torch.cuda.current_stream()
|
||||
|
||||
|
|
@ -431,6 +446,7 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
reduction_states = self._param_store.get_param_reduction_states()
|
||||
for tensor, _ in reduction_states.items():
|
||||
reduction_states[tensor] = False
|
||||
self._param_store.reset_reduced_data_for_compute_norm()
|
||||
|
||||
# accumulate gradient
|
||||
avg_gradients = self._grad_store._averaged_gradients
|
||||
|
|
@ -479,6 +495,30 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
|
||||
# Gradients may not be fully synchronized here.
|
||||
|
||||
def _compute_norm_with_stage(
|
||||
self,
|
||||
group_id: int = 0,
|
||||
last_bucket: bool = False,
|
||||
last_stage: bool = False,
|
||||
previous_norm=None,
|
||||
):
|
||||
# compute norm for gradients that have been reduced
|
||||
params, grads = self._param_store.get_reduced_param_for_compute_norm(group_id=group_id, last_bucket=last_bucket)
|
||||
if len(params) == 0:
|
||||
grads = [self.padding_grad]
|
||||
params = [self.padding_tensor]
|
||||
|
||||
if self._clip_grad_norm > 0:
|
||||
# this norm is before scaling, it will be very large
|
||||
norm = compute_norm(
|
||||
gradients=grads,
|
||||
parameters=params,
|
||||
last_stage=last_stage,
|
||||
previous_norm=previous_norm,
|
||||
)
|
||||
|
||||
return norm
|
||||
|
||||
def step(self, closure=None):
|
||||
"""Performs a single optimization step.
|
||||
|
||||
|
|
@ -490,27 +530,43 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
"""
|
||||
assert closure is None, "closure is not supported by step()"
|
||||
|
||||
timer("sync_grad").start()
|
||||
# if not overlapping communication (no reduction hook is attached)
|
||||
# we need to manually reduce these gradients
|
||||
if not self._overlap_communication:
|
||||
for group_id in range(len(self._fp16_param_groups)):
|
||||
for param in self._fp16_param_groups[group_id]:
|
||||
# we should not reduce the param in moe
|
||||
if param.grad is not None and not is_moe_param(param):
|
||||
self._store_and_try_reduce_grads_by_bucket(param)
|
||||
|
||||
# we need to reduce the gradients left in the communication bucket
|
||||
self._reduce_grads_stored_in_bucket()
|
||||
self._reduce_grads_stored_in_bucket(reduce_rank=None, last_bucket=True)
|
||||
|
||||
# compute norm for gradients in the before bucket
|
||||
groups_norms = []
|
||||
for group_id in range(self.num_param_groups):
|
||||
groups_norms.append(self._compute_norm_with_stage(group_id=group_id))
|
||||
|
||||
# clear reduced grads
|
||||
if self._overlap_communication:
|
||||
torch.cuda.synchronize()
|
||||
# grads in the last bucket is reduced
|
||||
self._comm_stream.synchronize()
|
||||
self._param_store.clear_grads_of_previous_reduced_params()
|
||||
|
||||
# compute norm for gradients in the last bucket
|
||||
total_norms = []
|
||||
for group_id in range(self.num_param_groups):
|
||||
total_norms.append(
|
||||
self._compute_norm_with_stage(
|
||||
group_id=group_id, last_bucket=True, last_stage=True, previous_norm=groups_norms[group_id]
|
||||
)
|
||||
)
|
||||
|
||||
timer("sync_grad").start()
|
||||
self._sync_grad()
|
||||
timer("sync_grad").stop()
|
||||
|
||||
return self._step(closure=closure)
|
||||
return self._step(closure=closure, norms=total_norms)
|
||||
|
||||
def _get_norm_with_moe_layers(self, norm_groups):
|
||||
# all_groups_norm_old = all_groups_norm
|
||||
|
|
@ -525,39 +581,18 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
# print(f"old = {all_groups_norm_old} and new = {all_groups_norm} at rank: {deepspeed.comm.get_rank()}")
|
||||
return all_groups_norm
|
||||
|
||||
def _step(self, closure=None):
|
||||
def _step(self, closure=None, norms=None):
|
||||
assert closure is None, "closure is not supported by step()"
|
||||
|
||||
# check for overflow
|
||||
found_inf = self._check_overflow()
|
||||
found_inf = False
|
||||
# if there is INF values in grades, compute_norm func would also returns -1
|
||||
# thus, we try to avoid call _check_overflow here
|
||||
# found_inf = self._check_overflow()
|
||||
# Because you may encounter inf when computing norm
|
||||
timer("cal_norm").start()
|
||||
norm_groups = []
|
||||
for group_id in range(self.num_param_groups):
|
||||
# compute norm
|
||||
if self._zero_local_rank not in self.param_group_no_params_ranks[group_id]:
|
||||
gradients = self._grad_store.get_averaged_gradients_by_group(group_id)
|
||||
parameters = self._param_store.get_fp16_params_by_rank_group(
|
||||
group_id=group_id, rank=self._zero_local_rank
|
||||
)
|
||||
else:
|
||||
# in order to prevent collection communication from hanging,
|
||||
# we need to involve rank that are not assigned parameters in compute_norm(),
|
||||
# so we give them a fp16 vector of 0 values.
|
||||
gradients = [self.padding_grad]
|
||||
parameters = [self.padding_tensor]
|
||||
|
||||
if self._clip_grad_norm > 0:
|
||||
# this norm is before scaling, it will be very large
|
||||
norm_group = compute_norm(
|
||||
gradients=gradients,
|
||||
parameters=parameters,
|
||||
)
|
||||
if norm_group == -1:
|
||||
timer("cal_norm").stop()
|
||||
if -1 in norms:
|
||||
found_inf = True
|
||||
break
|
||||
norm_groups.append(norm_group)
|
||||
|
||||
loss_scale = float(self.loss_scale.item()) # backup
|
||||
if gpc.config.model.dtype is not torch.float32:
|
||||
|
|
@ -573,7 +608,6 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
|
||||
# copy the grad of fp16 param to fp32 param
|
||||
single_grad_partition_groups = []
|
||||
global_norm = 0
|
||||
for group_id in range(self.num_param_groups):
|
||||
# compute norm
|
||||
# The following operations are performed only on the rank to which parameters are assigned.
|
||||
|
|
@ -582,13 +616,14 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
|
||||
# create flat gradient for the flat fp32 params
|
||||
gradients = self._grad_store.get_averaged_gradients_by_group(group_id)
|
||||
with torch.no_grad():
|
||||
flat_fp16_avg_grads = flatten(gradients)
|
||||
self._grad_store.reset_average_gradients_by_group(group_id)
|
||||
del gradients # release cuda memory
|
||||
gradients = None # release cuda memory
|
||||
|
||||
dtype = self._fp32_flat_param_groups_of_current_rank[group_id].dtype
|
||||
flat_fp32_avg_grads = flat_fp16_avg_grads.to(dtype)
|
||||
del flat_fp16_avg_grads # release cuda memory
|
||||
flat_fp16_avg_grads = None # release cuda memory
|
||||
|
||||
param_shape = self._fp32_flat_param_groups_of_current_rank[group_id].shape
|
||||
assert (
|
||||
|
|
@ -601,8 +636,10 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
|
||||
# unscale and clip grads
|
||||
# get the global norm
|
||||
global_norm_groups = []
|
||||
if self._clip_grad_norm > 0:
|
||||
global_norm = sum(norm_groups) ** 0.5
|
||||
for norm in norms:
|
||||
global_norm_groups.append(norm**0.5)
|
||||
|
||||
if self.has_moe:
|
||||
global_norm = self._get_norm_with_moe_layers(global_norm)
|
||||
|
|
@ -610,9 +647,8 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
# the following operations are performed only on the rank to which parameters are assigned.
|
||||
if gpc.config.model.dtype is not torch.float32:
|
||||
if len(single_grad_partition_groups) != 0:
|
||||
self._unscale_and_clip_grads(single_grad_partition_groups, global_norm, loss_scale)
|
||||
self._unscale_and_clip_grads(single_grad_partition_groups, global_norm_groups, loss_scale)
|
||||
|
||||
timer("cal_norm").stop()
|
||||
# update the parameters
|
||||
timer("step").start()
|
||||
|
||||
|
|
@ -637,7 +673,7 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
timer("step").stop()
|
||||
# update gradients may not be needed here, because the sync_params function is used in initialization,
|
||||
# so synchronization is maintained
|
||||
return True, global_norm / loss_scale
|
||||
return True, [global_norm / loss_scale for global_norm in global_norm_groups]
|
||||
|
||||
def broadcast_params(self, overlap=False):
|
||||
handles = []
|
||||
|
|
@ -681,18 +717,20 @@ class HybridZeroOptimizer(BaseOptimizer):
|
|||
|
||||
return self._found_overflow.item() > 0
|
||||
|
||||
def _unscale_and_clip_grads(self, grad_groups_flat, total_norm, loss_scale):
|
||||
def _unscale_and_clip_grads(self, grad_groups_flat, total_norm_groups, loss_scale):
|
||||
# compute combined scale factor for this group
|
||||
combined_scale = loss_scale
|
||||
combined_scale_groups = []
|
||||
|
||||
if self._clip_grad_norm > 0.0:
|
||||
# norm is in fact norm*scale
|
||||
for group_id, total_norm in enumerate(total_norm_groups):
|
||||
combined_scale_groups.append(loss_scale)
|
||||
clip = ((total_norm / loss_scale) + 1e-6) / self._clip_grad_norm
|
||||
if clip > 1.0:
|
||||
combined_scale = clip * loss_scale
|
||||
combined_scale_groups[group_id] = clip * loss_scale
|
||||
|
||||
for grad in grad_groups_flat:
|
||||
grad.data.mul_(1.0 / combined_scale)
|
||||
for group_id, grad in enumerate(grad_groups_flat):
|
||||
grad.data.mul_(1.0 / combined_scale_groups[group_id])
|
||||
|
||||
def clip_grad_norm(self, model, max_norm):
|
||||
# will conduct in the step()
|
||||
|
|
|
|||
|
|
@ -152,6 +152,11 @@ class ParameterStore(BaseStore):
|
|||
self._is_param_reduced = dict()
|
||||
self._reduced_param = []
|
||||
|
||||
self._former_bucket_reduced_param = {}
|
||||
self._last_bucket_reduced_param = {}
|
||||
self._former_bucket_reduced_grad = {}
|
||||
self._last_bucket_reduced_grad = {}
|
||||
|
||||
def set_param_to_rank(self, tensor: Tensor, rank: int) -> None:
|
||||
"""
|
||||
Set the mapping between parameter to rank, each parameter should be owned by a rank.
|
||||
|
|
@ -223,6 +228,39 @@ class ParameterStore(BaseStore):
|
|||
def add_previous_reduced_param(self, tensor):
|
||||
self._reduced_param.append(tensor)
|
||||
|
||||
def add_reduced_param_for_compute_norm(self, param, last_bucket=False):
|
||||
group_id = getattr(param, "group_id")
|
||||
if last_bucket:
|
||||
if group_id not in self._last_bucket_reduced_param:
|
||||
self._last_bucket_reduced_param[group_id] = []
|
||||
self._last_bucket_reduced_grad[group_id] = []
|
||||
|
||||
self._last_bucket_reduced_param[group_id].append(param)
|
||||
self._last_bucket_reduced_grad[group_id].append(param.grad)
|
||||
else:
|
||||
if group_id not in self._former_bucket_reduced_param:
|
||||
self._former_bucket_reduced_param[group_id] = []
|
||||
self._former_bucket_reduced_grad[group_id] = []
|
||||
|
||||
self._former_bucket_reduced_param[group_id].append(param)
|
||||
self._former_bucket_reduced_grad[group_id].append(param.grad)
|
||||
|
||||
def get_reduced_param_for_compute_norm(self, group_id=0, last_bucket=False):
|
||||
if not last_bucket:
|
||||
if group_id not in self._former_bucket_reduced_param:
|
||||
return [], []
|
||||
return self._former_bucket_reduced_param[group_id], self._former_bucket_reduced_grad[group_id]
|
||||
else:
|
||||
if group_id not in self._last_bucket_reduced_param:
|
||||
return [], []
|
||||
return self._last_bucket_reduced_param[group_id], self._last_bucket_reduced_grad[group_id]
|
||||
|
||||
def reset_reduced_data_for_compute_norm(self):
|
||||
self._former_bucket_reduced_param = {}
|
||||
self._last_bucket_reduced_param = {}
|
||||
self._former_bucket_reduced_grad = {}
|
||||
self._last_bucket_reduced_grad = {}
|
||||
|
||||
def clear_grads_of_previous_reduced_params(self):
|
||||
if len(self._reduced_param) > 0:
|
||||
for param in self._reduced_param:
|
||||
|
|
|
|||
|
|
@ -21,9 +21,10 @@ logger = get_logger(__file__)
|
|||
try:
|
||||
import amp_C
|
||||
from apex.multi_tensor_apply import multi_tensor_applier
|
||||
|
||||
APEX_AVAILABLE = True
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
logger.warn("The torch implementation for cal_l2norm is slower than apex. Please note this!")
|
||||
logger.warning("The torch implementation for cal_l2norm is slower than apex. Please note this!")
|
||||
APEX_AVAILABLE = False
|
||||
|
||||
inf = math.inf
|
||||
|
|
@ -162,6 +163,7 @@ def sync_param(flat_tensor, tensor_list):
|
|||
for p, q in zip(tensor_list, updated_params):
|
||||
p.data = q.data
|
||||
|
||||
|
||||
def multi_tensor_l2norm_torch(tensor_list, per_tensor):
|
||||
# Convert tensor_list elements to torch.float32
|
||||
tensor_list = [tensor.float() for tensor in tensor_list]
|
||||
|
|
@ -175,6 +177,7 @@ def multi_tensor_l2norm_torch(tensor_list, per_tensor):
|
|||
|
||||
return l2_norm, per_tensor_norm
|
||||
|
||||
|
||||
def calc_l2_norm(grads):
|
||||
norm = 0.0
|
||||
if len(grads) > 0:
|
||||
|
|
@ -187,6 +190,7 @@ def calc_l2_norm(grads):
|
|||
norm, _ = multi_tensor_l2norm_torch(grads, False)
|
||||
return norm
|
||||
|
||||
|
||||
def calc_lp(grads, norm_type):
|
||||
norm = 0.0
|
||||
for grad in grads:
|
||||
|
|
@ -195,7 +199,7 @@ def calc_lp(grads, norm_type):
|
|||
return norm
|
||||
|
||||
|
||||
def compute_norm(gradients, parameters, norm_type=2):
|
||||
def compute_norm(gradients, parameters, last_stage=False, previous_norm=None, norm_type=2):
|
||||
"""Get the norm
|
||||
Arguments:
|
||||
gradients (Iterable[Tensor]): The gradient value.
|
||||
|
|
@ -215,6 +219,13 @@ def compute_norm(gradients, parameters, norm_type=2):
|
|||
if norm_type == inf:
|
||||
total_norm = max(g.data.abs().max() for g in gradients)
|
||||
total_norm_cuda = torch.FloatTensor([float(total_norm)], device=gradients[0].device)
|
||||
|
||||
if last_stage is False:
|
||||
return total_norm_cuda
|
||||
|
||||
if previous_norm is not None:
|
||||
total_norm_cuda = max(total_norm_cuda, previous_norm)
|
||||
|
||||
# Take max across all model-parallel GPUs.
|
||||
if gpc.get_world_size(ParallelMode.MODEL) > 1:
|
||||
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=gpc.get_group(ParallelMode.MODEL))
|
||||
|
|
@ -261,6 +272,12 @@ def compute_norm(gradients, parameters, norm_type=2):
|
|||
|
||||
total_norm = tensor_parallel_norm
|
||||
|
||||
if last_stage is False:
|
||||
return total_norm
|
||||
|
||||
if previous_norm is not None:
|
||||
total_norm = total_norm + previous_norm
|
||||
|
||||
# Sum across all model-parallel GPUs.
|
||||
if gpc.is_initialized(ParallelMode.MODEL):
|
||||
dist.all_reduce(total_norm, op=dist.ReduceOp.SUM, group=gpc.get_group(ParallelMode.MODEL))
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ def sync_tensor(tensor, parallel_mode):
|
|||
|
||||
|
||||
# TODO: will be used in expert data parallel, may can also used in sync_model_param_within_tp
|
||||
def sync_model_param_within_ep(model):
|
||||
def sync_model_param_with_ep(model):
|
||||
r"""Make sure data parameters are consistent during Data Parallel Mode.
|
||||
|
||||
Args:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,670 @@
|
|||
import os
|
||||
import time
|
||||
from collections import OrderedDict
|
||||
from functools import partial
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
import pyecharts
|
||||
import torch
|
||||
|
||||
from internlm.core.context import ParallelMode
|
||||
from internlm.core.context import global_context as gpc
|
||||
from internlm.solver.pipeline_utils import partition_uniform
|
||||
|
||||
mb = 1024 * 1024
|
||||
|
||||
|
||||
class SimpleMemState:
|
||||
"""
|
||||
A class to represent the memory state of a model layer.
|
||||
|
||||
Args:
|
||||
layer_name (str): The name of the layer.
|
||||
layer_mem (int): The memory usage of the layer in bytes.
|
||||
"""
|
||||
|
||||
def __init__(self, layer_name: str, layer_mem: int = 0) -> None:
|
||||
self.layer_name = layer_name
|
||||
|
||||
# Memory status of the current model layer.
|
||||
self._layer_mem: int = layer_mem
|
||||
# Total memory status of the model and sub-models, initialized with layer memory.
|
||||
self._total_mem: int = self._layer_mem
|
||||
# SimpleMemState of sub-models.
|
||||
self.sub_model_stats = OrderedDict()
|
||||
|
||||
@property
|
||||
def layer_mem(self) -> int:
|
||||
"""
|
||||
Get the memory usage of the layer.
|
||||
|
||||
Returns:
|
||||
int: The memory usage of the layer in bytes.
|
||||
"""
|
||||
return self._layer_mem
|
||||
|
||||
@layer_mem.setter
|
||||
def layer_mem(self, new_layer_mem: int) -> None:
|
||||
"""
|
||||
Set the memory usage of the layer.
|
||||
|
||||
Args:
|
||||
new_layer_mem (int): The new memory usage of the layer in bytes.
|
||||
"""
|
||||
diff = new_layer_mem - self._layer_mem
|
||||
self._layer_mem = new_layer_mem
|
||||
self._total_mem += diff
|
||||
|
||||
@property
|
||||
def total_mem(self) -> int:
|
||||
"""
|
||||
Get the total memory usage of the model and sub-models.
|
||||
|
||||
Returns:
|
||||
int: The total memory usage in bytes.
|
||||
"""
|
||||
return self._total_mem
|
||||
|
||||
def add(self, layer_name: str, layer_mem: int = 0, flush: bool = True) -> None:
|
||||
"""
|
||||
Add a layer to the memory state.
|
||||
|
||||
Args:
|
||||
layer_name (str): The name of the layer.
|
||||
layer_mem (int, optional): The memory usage of the layer in bytes. Defaults to 0.
|
||||
flush (bool, optional): Whether to update the total memory usage. Defaults to True.
|
||||
"""
|
||||
path = layer_name.split(".")
|
||||
|
||||
target = self.find_layer_state(path, create=True)
|
||||
target.layer_mem = layer_mem
|
||||
|
||||
if flush:
|
||||
self.update_total_memory()
|
||||
|
||||
def delete(self, layer_name: str, flush: bool = True) -> None:
|
||||
"""
|
||||
Delete a layer from the memory state.
|
||||
|
||||
Args:
|
||||
layer_name (str): The name of the layer.
|
||||
flush (bool, optional): Whether to update the total memory usage. Defaults to True.
|
||||
"""
|
||||
path = layer_name.split(".")
|
||||
assert len(path) >= 2, f"Only support deleting non-root layers, layer_name: {layer_name}"
|
||||
|
||||
parent_path = path[0:-1]
|
||||
layer = path[-1]
|
||||
parent = self.find_layer_state(parent_path)
|
||||
|
||||
if parent is not None and layer in parent.sub_model_stats:
|
||||
del parent.sub_model_stats[layer]
|
||||
|
||||
if flush:
|
||||
self.update_total_memory()
|
||||
|
||||
def update_total_memory(self) -> None:
|
||||
"""
|
||||
Update the total memory usage of the model and sub-models.
|
||||
"""
|
||||
for stat in self.sub_model_stats.values():
|
||||
# Update sub-model status first.
|
||||
stat.update_total_memory()
|
||||
# Add sub-model total_mem to model total_mem.
|
||||
self._total_mem += stat._total_mem
|
||||
|
||||
def find_layer_state(self, path: Tuple[str], create: bool = False) -> "SimpleMemState":
|
||||
"""
|
||||
Find the memory state of a layer.
|
||||
|
||||
Args:
|
||||
path (Tuple[str]): The path to the layer.
|
||||
create (bool, optional): Whether to create the layer if it doesn't exist. Defaults to False.
|
||||
|
||||
Returns:
|
||||
SimpleMemState: The memory state of the layer.
|
||||
"""
|
||||
current_node = self
|
||||
|
||||
for _node in path:
|
||||
if _node not in current_node.sub_model_stats:
|
||||
if not create:
|
||||
return None
|
||||
# Create a layer node.
|
||||
current_node.sub_model_stats[_node] = SimpleMemState(_node)
|
||||
|
||||
current_node = current_node.sub_model_stats[_node]
|
||||
|
||||
return current_node
|
||||
|
||||
def dump(self, prefix: str = "") -> str:
|
||||
"""
|
||||
Dump the memory state of the model and sub-models.
|
||||
|
||||
Args:
|
||||
prefix (str, optional): The prefix to add to the layer names. Defaults to "".
|
||||
|
||||
Returns:
|
||||
str: The memory state information.
|
||||
"""
|
||||
cur_prefix = prefix + "." + self.layer_name if prefix != "" else self.layer_name
|
||||
res = f"layer: {cur_prefix}, layer_mem: {self.layer_mem / mb:.2f} MB, total_mem: {self.total_mem / mb:.2f} MB\n"
|
||||
|
||||
for sub_layer in self.sub_model_stats.values():
|
||||
res += sub_layer.dump(cur_prefix)
|
||||
|
||||
return res
|
||||
|
||||
def to_json(self, base: int = 1024 * 1024) -> dict:
|
||||
"""
|
||||
Convert the memory state to a JSON structure.
|
||||
|
||||
Returns:
|
||||
dict: The JSON structure of the memory state.
|
||||
"""
|
||||
children = [child.to_json() for child in self.sub_model_stats.values()]
|
||||
if len(children) == 0:
|
||||
return {"name": self.layer_name, "value": self.layer_mem // base}
|
||||
else:
|
||||
return {"name": self.layer_name, "children": children}
|
||||
|
||||
|
||||
class SimpleMemoryProfiler:
|
||||
"""
|
||||
A memory profiler for a llm model.
|
||||
|
||||
Args:
|
||||
model (torch.nn.Module): The model to profile.
|
||||
optimizer (torch.optim.Optimizer): The optimizer used for training the model.
|
||||
log_file (str): The file to write the memory state information to.
|
||||
activation_config (List[str], optional): The list of activation layers to track. Defaults to None.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: torch.nn.Module,
|
||||
optimizer: torch.optim.Optimizer,
|
||||
log_folder: str,
|
||||
total_steps: int = 5,
|
||||
activation_config: List[str] = None,
|
||||
):
|
||||
self._model = model
|
||||
self._optimizer = optimizer
|
||||
self._log_folder = log_folder
|
||||
self._remaining_steps = total_steps
|
||||
|
||||
self._stoped = False
|
||||
self._record_start_time = time.time()
|
||||
|
||||
# For activation memory state.
|
||||
self._activation_config = activation_config
|
||||
self._activation_mem_inited: bool = False
|
||||
self._activation_mem: int = 0
|
||||
self._activation_max_count = 0
|
||||
self._activation_base_mem: SimpleMemState = SimpleMemState("activations")
|
||||
|
||||
# Check or create log folder
|
||||
os.makedirs(self._log_folder, exist_ok=True)
|
||||
|
||||
# Register activation memory tracking hooks
|
||||
self._register_activation_trace_hooks()
|
||||
|
||||
# Calculate static parameter cuda memory
|
||||
self._param_mem_state = SimpleMemState("param_mem")
|
||||
self._calc_tensor_memory(self._param_mem_state, self._model.named_parameters())
|
||||
# Calculate static grad cuda memory
|
||||
self._grad_mem_state = SimpleMemState("grad_mem")
|
||||
self._calc_tensor_memory(self._grad_mem_state, self._model.named_parameters(), True)
|
||||
# Calculate static optimizer state cuda memory
|
||||
self._os_params_mem_state = SimpleMemState("os_params_mem")
|
||||
self._os_state_mem_state = SimpleMemState("os_state_mem")
|
||||
self._calc_tensor_group_memory(self._os_params_mem_state, list(enumerate(self._optimizer.param_groups)))
|
||||
|
||||
# Generate the first memory record
|
||||
self.point(create=True)
|
||||
|
||||
def point(self, with_options: str = "", create: bool = False) -> None:
|
||||
"""
|
||||
Record the memory state.
|
||||
|
||||
Args:
|
||||
with_options (str, optional): The options to include in the memory state. Defaults to "".
|
||||
create (bool, optional): Whether to create a new memory record file. Defaults to False.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
now = time.time()
|
||||
file = f"{self._log_folder}/memory.log"
|
||||
|
||||
if with_options == "all":
|
||||
options = ["params", "grads", "os_params", "os_state", "activation_base"]
|
||||
else:
|
||||
options = with_options.split(",")
|
||||
|
||||
total_mem = (
|
||||
self._param_mem_state.total_mem
|
||||
+ self._grad_mem_state.total_mem
|
||||
+ self._os_params_mem_state.total_mem
|
||||
+ self._os_state_mem_state.total_mem
|
||||
+ self._activation_mem
|
||||
) / mb
|
||||
|
||||
# Generate summary information for memory state
|
||||
summary_info = (
|
||||
f"total_memory: {total_mem:.2f} MB"
|
||||
+ "\n"
|
||||
+ f"params_memory: {self._param_mem_state.total_mem / mb:.2f} MB, "
|
||||
+ f"grads_memory: {self._grad_mem_state.total_mem / mb:.2f} MB, "
|
||||
+ f"os_params_memory: {self._os_params_mem_state.total_mem / mb:.2f} MB, "
|
||||
+ f"os_state_memory: {self._os_state_mem_state.total_mem / mb:.2f} MB, "
|
||||
+ f"activation_memory: {self._activation_mem / mb:.2f} MB"
|
||||
)
|
||||
|
||||
# Generate layout information based on selected options
|
||||
layout_info = ""
|
||||
if "params" in options:
|
||||
layout_info += "params_layout:\n" + self._param_mem_state.dump()
|
||||
if "grads" in options:
|
||||
layout_info += "grads_layout:\n" + self._grad_mem_state.dump()
|
||||
if "os_params" in options:
|
||||
layout_info += "os_params_layout:\n" + self._os_params_mem_state.dump()
|
||||
if "os_state" in options:
|
||||
layout_info += "os_state_layout:\n" + self._os_state_mem_state.dump()
|
||||
if "activation_base" in options:
|
||||
layout_info += "activation_base_layout:\n" + self._activation_base_mem.dump()
|
||||
|
||||
# Write memory state information to log file
|
||||
file_mode = "w" if create else "a"
|
||||
with open(file, file_mode, encoding="utf-8") as writer:
|
||||
writer.write(
|
||||
"Memory State:\n" + f"time: {now - self._record_start_time}\n" + "---summary---\n" + summary_info + "\n"
|
||||
)
|
||||
if layout_info != "":
|
||||
writer.write("---Layout---\n" + layout_info)
|
||||
writer.write("\n")
|
||||
|
||||
def step(self) -> None:
|
||||
"""
|
||||
Update the memory state of the optimizer state.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
if self._stoped:
|
||||
return
|
||||
|
||||
self._remaining_steps -= 1
|
||||
if self._remaining_steps == 0:
|
||||
self._stoped = True
|
||||
|
||||
# Update os state memory usage
|
||||
self._os_state_mem_state = SimpleMemState("os_state_mem")
|
||||
self._calc_tensor_group_memory(self._os_state_mem_state, list(self._optimizer.state_dict()["state"].items()))
|
||||
|
||||
if not self._stoped:
|
||||
# Do we need to print os_state_layout every time? Is it always constant?
|
||||
self.point(with_options="os_state")
|
||||
else:
|
||||
# Dump memory layout
|
||||
self.point(with_options="all")
|
||||
# Generate sunburst charts
|
||||
self._render_sunburst_chart(self._param_mem_state.to_json()["children"], "params_memory_sunburst")
|
||||
self._render_sunburst_chart(self._grad_mem_state.to_json()["children"], "grads_memory_sunburst")
|
||||
self._render_sunburst_chart(
|
||||
[self._os_params_mem_state.to_json(), self._os_state_mem_state.to_json()],
|
||||
"os_memory_sunburst",
|
||||
)
|
||||
self._render_sunburst_chart(self._activation_base_mem.to_json()["children"], "activation_memory_sunburst")
|
||||
# Generate summary sunburst chart
|
||||
summary_sunburst_data = [
|
||||
{"name": "params", "value": self._param_mem_state.total_mem // mb},
|
||||
{"name": "grads", "value": self._grad_mem_state.total_mem // mb},
|
||||
{"name": "os_params", "value": self._os_params_mem_state.total_mem // mb},
|
||||
{"name": "os_state", "value": self._os_state_mem_state.total_mem // mb},
|
||||
{"name": "activation", "value": self._activation_base_mem.total_mem // mb},
|
||||
]
|
||||
|
||||
self._render_sunburst_chart(summary_sunburst_data, "summary_sunburst")
|
||||
|
||||
def _render_sunburst_chart(self, data: Any, name: str) -> None:
|
||||
pyecharts.charts.Sunburst(init_opts=pyecharts.options.InitOpts(width="1000px", height="1000px")).add(
|
||||
name,
|
||||
data_pair=data,
|
||||
highlight_policy="ancestor",
|
||||
radius=[0, "95%"],
|
||||
levels=[
|
||||
{},
|
||||
{
|
||||
"r0": "10%",
|
||||
"r": "40%",
|
||||
"itemStyle": {"borderWidth": 3},
|
||||
"label": {"align": "left"},
|
||||
},
|
||||
{"r0": "40%", "r": "65%", "label": {"align": "left"}},
|
||||
{"r0": "65%", "r": "80%", "label": {"align": "left"}},
|
||||
{"r0": "80%", "r": "90%", "label": {"align": "left"}},
|
||||
{
|
||||
"r0": "90%",
|
||||
"r": "92%",
|
||||
"label": {"position": "outside", "padding": 3, "silent": False},
|
||||
"itemStyle": {"borderWidth": 3},
|
||||
},
|
||||
],
|
||||
).set_global_opts(title_opts=pyecharts.options.TitleOpts(title="CUDA Memory")).set_series_opts(
|
||||
label_opts=pyecharts.options.LabelOpts(formatter="{b}")
|
||||
).render(
|
||||
f"{self._log_folder}/{name}.html"
|
||||
)
|
||||
|
||||
def _inner_activation_trace_hook(self, layer_name: str, model: Any, inputs: Any, output: torch.Tensor) -> None:
|
||||
"""
|
||||
Hook function to trace the activation memory usage for a inner layer.
|
||||
|
||||
Args:
|
||||
layer_name (str): The name of the layer.
|
||||
model (Any): The model.
|
||||
inputs (Any): The inputs to the layer.
|
||||
output (torch.Tensor): The output tensor.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
del model, inputs
|
||||
assert isinstance(output, torch.Tensor), f"Invalid output type: {type(output)}"
|
||||
|
||||
if self._stoped or self._activation_mem_inited:
|
||||
return
|
||||
|
||||
# Delay updating the total_mem of activation_base_mem here, it will be handled in the forward ending hook.
|
||||
self._activation_base_mem.add(layer_name, output.element_size() * output.nelement(), flush=False)
|
||||
|
||||
def _activation_trace_hook_forward(self, model: Any, inputs: Any, output: torch.Tensor) -> None:
|
||||
"""
|
||||
Hook function to trace the activation memory usage for a forward pass.
|
||||
|
||||
Args:
|
||||
model (Any): The model.
|
||||
inputs (Any): The inputs to the model.
|
||||
output (torch.Tensor): The output tensor.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
del model, inputs
|
||||
assert isinstance(output, torch.Tensor), f"invalid output type: {type(output)}"
|
||||
|
||||
if self._stoped:
|
||||
return
|
||||
|
||||
# Check if the activation memory has been initialized
|
||||
if self._activation_mem_inited is False:
|
||||
# Update the total memory of the activation base memory state
|
||||
self._activation_base_mem.update_total_memory()
|
||||
# Set with_options to "activation_base" to include activation_base_layout in the memory dump
|
||||
self._activation_mem_inited = True
|
||||
|
||||
# Accumulate activation memory usage for each forward pass
|
||||
self._activation_mem += self._activation_base_mem.total_mem
|
||||
|
||||
# Update activation max count
|
||||
if self._activation_mem // self._activation_base_mem.total_mem > self._activation_max_count:
|
||||
self._activation_max_count = self._activation_mem // self._activation_base_mem.total_mem
|
||||
|
||||
# Trigger a memory record
|
||||
self.point()
|
||||
|
||||
def _activation_tarce_hook_backward(self, model: Any, inputs: Any, grad_outputs: Any) -> None:
|
||||
"""
|
||||
Hook function to trace the activation memory usage for a backward pass.
|
||||
|
||||
Args:
|
||||
model (Any): The model.
|
||||
inputs (Any): The inputs to the model.
|
||||
grad_outputs (Any): The gradients of the outputs.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
del model, inputs, grad_outputs
|
||||
|
||||
if self._stoped:
|
||||
return
|
||||
|
||||
# Release activation memory usage for each backward pass
|
||||
self._activation_mem -= self._activation_base_mem.total_mem
|
||||
|
||||
# Trigger a memory record
|
||||
self.point()
|
||||
|
||||
def _register_activation_trace_hooks(self) -> None:
|
||||
"""
|
||||
Register activation trace hooks for the model and each submodule in the model.
|
||||
"""
|
||||
|
||||
# Register inner activation trace hooks for each submodule in the model
|
||||
for layer_name in self._activation_config:
|
||||
# Register a hook for every activation
|
||||
model = self._model
|
||||
sub_models = layer_name.split(".")
|
||||
# Get the target sub-model
|
||||
for sub_model_name in sub_models:
|
||||
try:
|
||||
model = model.get_submodule(sub_model_name)
|
||||
except AttributeError:
|
||||
model = None
|
||||
break
|
||||
|
||||
# Register the hook
|
||||
if model is not None:
|
||||
model.register_forward_hook(partial(self._inner_activation_trace_hook, layer_name))
|
||||
|
||||
# Register a forward hook for the main model to track activation memory usage
|
||||
self._model.register_forward_hook(self._activation_trace_hook_forward)
|
||||
# Register a backward hook for the main model to release activation memory usage
|
||||
self._model.register_full_backward_hook(self._activation_tarce_hook_backward)
|
||||
|
||||
def _calc_tensor_memory(
|
||||
self, root_stat: SimpleMemState, named_tensors: Dict[str, torch.Tensor], require_grad: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Calculate the memory usage of tensors and update the memory state.
|
||||
|
||||
Args:
|
||||
root_stat (SimpleMemState): The root memory state.
|
||||
named_tensors (Dict[str, torch.Tensor]): A dictionary containing the named tensors.
|
||||
require_grad (bool, optional): Whether to consider tensors with gradients. Defaults to False.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
for name, tensor in named_tensors:
|
||||
if require_grad and not tensor.requires_grad:
|
||||
continue
|
||||
|
||||
layer_splits = name.split(sep=".")
|
||||
layer_stat = root_stat.find_layer_state(layer_splits, create=True)
|
||||
layer_stat.layer_mem = tensor.element_size() * tensor.nelement()
|
||||
|
||||
root_stat.update_total_memory()
|
||||
|
||||
def _calc_tensor_group_memory(self, root_stat: SimpleMemState, tensor_groups: List[Tuple[int, torch.Tensor]]):
|
||||
"""
|
||||
Calculate the memory usage of a group of tensors.
|
||||
|
||||
Args:
|
||||
root_stat (SimpleMemState): The root memory state.
|
||||
tensor_groups (List[Tuple[int, torch.Tensor]]): A list of tuples containing the tensor groups.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
def _normalize_helper(named_tensors: Dict[str, Any]) -> List[Tuple[str, Any]]:
|
||||
"""
|
||||
Normalize the named tensors.
|
||||
|
||||
Args:
|
||||
named_tensors (Dict[str, Any]): The named tensors to normalize.
|
||||
|
||||
Returns:
|
||||
List[Tuple[str, Any]]: The normalized named tensors.
|
||||
"""
|
||||
res = {}
|
||||
|
||||
for name, tensors in named_tensors.items():
|
||||
if isinstance(tensors, torch.Tensor):
|
||||
res[name] = tensors
|
||||
elif isinstance(tensors, (list, tuple)):
|
||||
for index, tensor in enumerate(tensors):
|
||||
res[f"{name}.{index}"] = tensor
|
||||
elif isinstance(tensors, dict):
|
||||
for subname, tensor in tensors.items():
|
||||
res[f"{name}.{subname}"] = tensor
|
||||
else:
|
||||
raise TypeError(f"unsupported normalize value type: {type(tensors)}")
|
||||
|
||||
return list(res.items())
|
||||
|
||||
def _value_check(tensor_or_tensors):
|
||||
"""
|
||||
Check if the input is a tensor or a collection of tensors.
|
||||
|
||||
Args:
|
||||
tensor_or_tensors (Any): The input to check.
|
||||
|
||||
Returns:
|
||||
bool: True if the input is a tensor or a collection of tensors, False otherwise.
|
||||
"""
|
||||
if torch.is_tensor(tensor_or_tensors):
|
||||
return True
|
||||
elif isinstance(tensor_or_tensors, (list, tuple)) and all(torch.is_tensor(x) for x in tensor_or_tensors):
|
||||
return True
|
||||
elif isinstance(tensor_or_tensors, dict) and all(torch.is_tensor(x) for x in tensor_or_tensors.values()):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
# Calculate the memory usage of a group of tensors.
|
||||
for idx, tensors in tensor_groups:
|
||||
# Normalize the named tensors
|
||||
named_tensors = {f"{idx}.{k}": v for k, v in tensors.items() if _value_check(v)}
|
||||
named_tensors = _normalize_helper(named_tensors)
|
||||
# Calculate the memory usage of the tensors and update the memory state
|
||||
self._calc_tensor_memory(root_stat, named_tensors)
|
||||
|
||||
|
||||
def build_activation_config(num_layers: int, num_chunks: int = 1) -> List[str]:
|
||||
# TODO: support interleaved pipeline scheduling.
|
||||
assert num_chunks == 1, "Only support num_chunks == 1"
|
||||
|
||||
if gpc.is_initialized(ParallelMode.PIPELINE):
|
||||
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
|
||||
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
|
||||
else:
|
||||
pipeline_size = 1
|
||||
pipeline_rank = 0
|
||||
|
||||
all_parts = partition_uniform(num_layers, pipeline_size, num_chunks)
|
||||
parts = all_parts[pipeline_rank]
|
||||
start, end = parts[0]
|
||||
num_blocks = end - start
|
||||
|
||||
block_conf_tmpl = [
|
||||
"mixer.rotary_emb",
|
||||
"mixer.Wqkv",
|
||||
"mixer.inner_attn",
|
||||
"mixer.inner_cross_attn",
|
||||
"mixer.out_proj",
|
||||
# "dropout1", # skip when dropout_selective_checkpoint is True
|
||||
# "dropout2", # skip when dropout_selective_checkpoint is True
|
||||
"norm1",
|
||||
"norm2",
|
||||
"mlp.w1",
|
||||
"mlp.w2",
|
||||
"mlp.w3",
|
||||
]
|
||||
|
||||
block_conf = []
|
||||
for block_id in range(num_blocks):
|
||||
block_conf += [f"blocks.{block_id}.{layer}" for layer in block_conf_tmpl]
|
||||
|
||||
# We don't need to care about whether the embedding, norm, and head layers exist in the model after partitioning.
|
||||
# If they don't exist, they will be automatically ignored when registering activation trace hooks.
|
||||
activation_conf = ["embedding", "norm", "head"] + block_conf
|
||||
|
||||
return activation_conf
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
class SimpleModel(torch.nn.Module):
|
||||
"""
|
||||
A simple model with three linear layers.
|
||||
|
||||
Args:
|
||||
skip_layer2 (bool, optional): Whether to skip layer2. Defaults to False.
|
||||
"""
|
||||
|
||||
def __init__(self, skip_layer2: bool = False):
|
||||
super().__init__()
|
||||
self.layer1 = torch.nn.Linear(5120, 5120, True)
|
||||
self.layer3 = torch.nn.Linear(5120, 5120, False)
|
||||
|
||||
if skip_layer2:
|
||||
self.layer2 = None
|
||||
else:
|
||||
self.layer2 = SimpleModel(skip_layer2=True)
|
||||
|
||||
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
Forward pass of the model.
|
||||
|
||||
Args:
|
||||
inputs (torch.Tensor): The input tensor.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: The output tensor.
|
||||
"""
|
||||
output1 = self.layer1(inputs)
|
||||
if self.layer2 is not None:
|
||||
output2 = self.layer2(output1)
|
||||
else:
|
||||
output2 = output1
|
||||
output = self.layer3(output2)
|
||||
|
||||
return output
|
||||
|
||||
# init model and optimizer
|
||||
_model: torch.nn.Module = SimpleModel()
|
||||
_optimizer = torch.optim.Adam(_model.parameters())
|
||||
|
||||
# create activation config for simple model layer by layer.
|
||||
activation_configs = [
|
||||
# model level 0
|
||||
"layer1",
|
||||
"layer2",
|
||||
"layer3",
|
||||
# model level 1
|
||||
"layer2.layer1",
|
||||
"layer2.layer3",
|
||||
]
|
||||
|
||||
_model.modules()
|
||||
|
||||
# init profiler
|
||||
profiler = SimpleMemoryProfiler(_model, _optimizer, "./test_simple_memory_profiler.log", activation_configs)
|
||||
|
||||
_optimizer.zero_grad()
|
||||
|
||||
x1 = torch.randn((128, 5120))
|
||||
x2 = torch.randn((128, 5120))
|
||||
out1 = _model(x1)
|
||||
out2 = _model(x2)
|
||||
out1.mean().backward()
|
||||
out2.mean().backward()
|
||||
|
||||
_optimizer.step()
|
||||
|
||||
# Update the optimizer state memory usage and record the memory state
|
||||
profiler.step()
|
||||
|
|
@ -383,12 +383,12 @@ class StorageManager(metaclass=SingletonMeta):
|
|||
}
|
||||
CLI_DICT = {}
|
||||
|
||||
def __init__(self, enable_save, tmp_local_folde="/dev/shm/test/", async_mode=True, n_async_workers=8) -> None:
|
||||
def __init__(self, enable_save, tmp_local_folder="/dev/shm/test/", async_mode=True, n_async_workers=8) -> None:
|
||||
self._exception_list = []
|
||||
self._to_be_del_files = []
|
||||
self._async_stack = []
|
||||
self.upload_count = 0
|
||||
self.tmp_local_folder = tmp_local_folde
|
||||
self.tmp_local_folder = tmp_local_folder
|
||||
self.async_mode = async_mode
|
||||
self.has_warning = False
|
||||
|
||||
|
|
@ -523,7 +523,6 @@ class StorageManager(metaclass=SingletonMeta):
|
|||
pass
|
||||
|
||||
async def _sync_tasks(self) -> Awaitable[None]:
|
||||
|
||||
if not self._async_stack:
|
||||
return
|
||||
|
||||
|
|
@ -591,7 +590,7 @@ def init_storage_manager(ckpt_config):
|
|||
global storage_manager
|
||||
storage_manager = StorageManager(
|
||||
ckpt_config.enable_save_ckpt,
|
||||
tmp_local_folde=ckpt_config.async_upload_tmp_folder,
|
||||
tmp_local_folder=ckpt_config.async_upload_tmp_folder,
|
||||
async_mode=ckpt_config.async_upload,
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,26 @@
|
|||
import signal
|
||||
|
||||
|
||||
class Timeout:
|
||||
"""Timer to execute code
|
||||
|
||||
Adapted from https://github.com/reasoning-machines/pal
|
||||
|
||||
Args:
|
||||
seconds (float): The maximum seconds to execute code
|
||||
error_message (str)
|
||||
"""
|
||||
|
||||
def __init__(self, seconds=1, error_message="Timeout"):
|
||||
self.seconds = seconds
|
||||
self.error_message = error_message
|
||||
|
||||
def timeout_handler(self, signum, frame):
|
||||
raise TimeoutError(self.error_message)
|
||||
|
||||
def __enter__(self):
|
||||
signal.signal(signal.SIGALRM, self.timeout_handler)
|
||||
signal.alarm(self.seconds)
|
||||
|
||||
def __exit__(self, error_type, value, traceback):
|
||||
signal.alarm(0)
|
||||
|
|
@ -12,4 +12,5 @@ packaging
|
|||
boto3
|
||||
botocore
|
||||
torch-scatter
|
||||
pyecharts
|
||||
-f https://data.pyg.org/whl/torch-1.13.0+cu117.html
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
本目录提供辅助模型训练的一些工具,文件结构如下所示:
|
||||
|
||||
```bash
|
||||
├── transformers # 适配hugging face的transformers的一些工具
|
||||
│ ├── configuration_internlm.py # config适配工具
|
||||
|
|
@ -9,9 +10,11 @@
|
|||
```
|
||||
|
||||
# tokenizer.py
|
||||
|
||||
生成原始数据的`bin`和`meta`文件需要使用`tokenizer`,我们通过在`tools/tokenizer.py`中指定模型参数路径的方式来导入tokenizer模型。目前我们提供了`V7_sft.model`来生成tokens。若想使用不同的模型,可直接修改`tokernizer.py`中的模型参数路径。
|
||||
|
||||
可以运行以下命令生成原始数据对应的`bin`和`meta`文件,其中参数`text_input_path`表示原始文本数据路径,目前支持`txt`、`json`和`jsonl`三种输入格式,`bin_output_path`表示生成的`bin`文件的保存路径。
|
||||
|
||||
```bash
|
||||
$ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_path your_output_bin_path
|
||||
```
|
||||
|
|
@ -19,6 +22,7 @@ $ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_
|
|||
下面是一个数据处理的例子:
|
||||
|
||||
给定一个包含原始数据集的文件`raw_data.txt`,原始数据集如下所示:
|
||||
|
||||
```bash
|
||||
感恩生活中的每一个细节,才能真正体会到幸福的滋味。
|
||||
梦想是人生的动力源泉,努力追逐,才能实现自己的目标。
|
||||
|
|
@ -35,19 +39,73 @@ $ python tools/tokenizer.py --text_input_path raw_data.txt --bin_output_path cn/
|
|||
其中,`cn`表示中文数据集;`en`表示英文数据集;`code`表示代码数据集;`ja`表示日语数据集;`ar`表示阿拉伯语数据集;`kaoshi`表示考试数据集。
|
||||
|
||||
生成的bin文件的格式如下:
|
||||
|
||||
```python
|
||||
{"tokens": [73075, 75302, 69522, 69022, 98899, 67713, 68015, 81269, 74637, 75445, 99157]}
|
||||
{"tokens": [69469, 60355, 73026, 68524, 60846, 61844, 98899, 67775, 79241, 98899, 67713, 67800, 67453, 67838, 99157]}
|
||||
{"tokens": [68057, 79017, 60378, 68014, 98899, 67713, 67990, 68015, 70381, 67428, 61003, 67622, 99157]}
|
||||
```
|
||||
|
||||
`bin`文件中的每一行均对应原始数据集中的每一个句子,表示每个句子的`token`(下文将用sequence指定)。
|
||||
|
||||
生成的`meta`文件的格式如下:
|
||||
|
||||
```bash
|
||||
(0, 11), (90, 15), (208, 13)
|
||||
```
|
||||
|
||||
在`meta`文件中,每个元组对应着`bin`文件中每一个`sequence`的元信息。其中,元组的第一个元素表示每个`sequence`在所有`sequence`中的`starting index`,第二个元素表示每个`sequence`中有多少个`tokens`。
|
||||
|
||||
例如,对于第一个`sequence`,`starting index`为 0,有 11 个`tokens`;对于第二个`sequence`,由于第一个`sequence`转换为`string`后的长度为`89`,因此它的`starting index`为 90,有 15 个`tokens`。
|
||||
|
||||
`json`和`jsonl`类型的文件的`bin`和`meta`文件格式和`txt`一致,此处不再赘叙。
|
||||
|
||||
# pal_inference.py
|
||||
|
||||
在 [GSM8K](https://huggingface.co/datasets/gsm8k) 数据集上使用 [PAL](https://github.com/reasoning-machines/pal) 范式推理,使模型编写代码并通过 Python 解释器执行来解决数学问题。其用法如下:
|
||||
|
||||
```python
|
||||
# 用法:
|
||||
python pal_inference.py <model> <out_dir> [--dataset <dataset>] [--max_length <length>] [--top_p <threshold>] [--eoh <end token>] [--eoa <end token>] [--eos <end token>] [--temperature <temp>] [--time_out <time>] [--verbose, -v] [--append, -a]
|
||||
|
||||
# 参数:
|
||||
# <model> 用于推理的模型的路径。
|
||||
# <out_dir> 生成代码将保存在指定的输出文件夹中。
|
||||
|
||||
# 可选参数:
|
||||
# --dataset <dataset> 用于代码生成的数据集名称(默认:gsm8k)。
|
||||
# --max_length <length> 模型最大输入 token 长度(默认:2048)。
|
||||
# --top_p <threshold> 候选 token 相加的概率阈值(默认:0.8)。
|
||||
# --eoh <end token> 用户输入结束标识符 (默认: "") 。
|
||||
# --eoa <end token> 模型输入结束标识符 (默认: "") 。
|
||||
# --eos <end token> 系统输入结束标识符. (默认: "") 。
|
||||
# --temperature, -t <temp> 生成过程中的采样温度(默认:1.0)。
|
||||
# --time_out <time> 执行生成的代码的最大时间(秒)(默认:100)。
|
||||
# --verbose, -v 打印代码错误信息(可选)。
|
||||
# --append, -a 将输出追加到历史结果中(可选)。
|
||||
```
|
||||
|
||||
以下是使用示例:
|
||||
|
||||
```bash
|
||||
python tools/pal_inference.py internlm/internlm-chat-7k ./output -v
|
||||
```
|
||||
|
||||
其输出文件每一行包括输入的问题,正确答案,执行答案,得分,以及模型生成的 Python 代码块:
|
||||
|
||||
````json
|
||||
{
|
||||
"question": "Janet\u2019s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers' market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers' market?",
|
||||
"target": 18.0,
|
||||
"answer": 18.0,
|
||||
"score": 1,
|
||||
"generation": ["```python\ndef solution():\n eggs_per_day = 16\n eggs_per_breakfast = 3\n eggs_per_muffin = 4\n eggs_used = eggs_per_day - eggs_per_breakfast - eggs_per_muffin\n eggs_sold = eggs_used\n price_per_egg = 2\n eggs_made = eggs_sold * price_per_egg\n result = eggs_made\n return result\n```"]
|
||||
}
|
||||
````
|
||||
|
||||
InternLM 在 GSM8K 数据集中带工具和不带工具的性能表现:
|
||||
|
||||
| Method | **InternLM-Chat-7B** |
|
||||
| -------- | -------------------- |
|
||||
| w/o tool | 34.5 |
|
||||
| w tool | 39.2 |
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
This directory provide some tools for model training with the following file structure.
|
||||
|
||||
```bash
|
||||
├── transformers # tools for adapting Hugging Face's transformers
|
||||
│ ├── configuration_internlm.py # tools for adapting config
|
||||
|
|
@ -9,6 +10,7 @@ This directory provide some tools for model training with the following file str
|
|||
```
|
||||
|
||||
# tokenizer.py
|
||||
|
||||
We need to use a `tokenizer` to generate `bin` and `meta` files for raw data. We import the tokenizer model by specifying the model weight path in `tools/tokenizer.py`. Currently, we provide `V7.model` to generate tokens. If you want to use a different model, you can modify the model weight path in `tokenizer.py` directly.
|
||||
|
||||
We can run the following command to generate `bin` and `meta` files corresponding to the original data. The parameter `text_input_path` represents the path of the original text data, currently supporting `txt`, `json`, and `jsonl` formats, while `bin_output_path` represents the save path of the generated `bin` files.
|
||||
|
|
@ -19,12 +21,15 @@ $ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_
|
|||
An example of data processing in `txt` format is given here:
|
||||
|
||||
Given a file `raw_data.txt` containg raw data with the following content.
|
||||
|
||||
```bash
|
||||
Appreciate every detail in life to truly taste the flavor of happiness.
|
||||
Dreams are the source of life’s motivation. Pursue them diligently to achieve your goals.
|
||||
Learn to be tolerant and understanding to establish truly harmonious interpersonal relationships.
|
||||
```
|
||||
|
||||
Next, we can run the following command to generate `bin` and `meta` files for raw data.
|
||||
|
||||
```bash
|
||||
$ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_path your_output_bin_path
|
||||
```
|
||||
|
|
@ -32,19 +37,73 @@ $ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_
|
|||
It should be noted that the generated `bin` files should be placed in one of the following directories to clarify the data type: `cn`(Chinese), `en`(English), `code`(code data), `ja`(Japanese), `ar`(Arabic) and `kaoshi`(kaoshi data).
|
||||
|
||||
The format of generated `bin` file is as follows.
|
||||
|
||||
```python
|
||||
{"tokens": [98655, 2317, 2922, 6649, 1595, 7856, 435, 2424, 442, 9556, 12807, 410, 17313, 446, 23331, 95746]}
|
||||
{"tokens": [98655, 302, 1383, 269, 657, 410, 2687, 446, 2424, 98667, 269, 25220, 281, 523, 1874, 492, 1248, 38127, 4563, 442, 11227, 829, 8980, 95746]}
|
||||
{"tokens": [98655, 24190, 442, 517, 15013, 649, 454, 8793, 442, 5849, 9556, 17917, 1369, 1084, 29890, 12021, 95746]}
|
||||
```
|
||||
|
||||
In the generated `bin` file, each line (`sequence`) corresponds to the `tokens` for each sentence in the raw data.
|
||||
|
||||
The format of generated `meta` file in as follows.
|
||||
|
||||
```bash
|
||||
(0, 16), (110, 24), (262, 17)
|
||||
```
|
||||
|
||||
Each tuple in the `meta` file represents the meta information of each `sequence` where the first element in the tuple indicates the `starting index` of each `sequence` among all `sequences` and the second element indicates the amount of `tokens` for each `sequence`.
|
||||
|
||||
For example, the `starting index` is 0 for the first `sequence` with 16 `tokens`. Since the length of `sequence` in `string` format is 109, the `starting index` is 110. And the number of `tokens` of the sencond `sequence` is 24.
|
||||
|
||||
The `bin` and `meta` file formats for `json` and `jsonl` type files are the same as for `txt`, so we won't go over them here.
|
||||
|
||||
# pal_inference.py
|
||||
|
||||
Perform reasoning using [PAL](https://github.com/reasoning-machines/pal) on the [GSM8K](https://huggingface.co/datasets/gsm8k) dataset, allowing the model to generate code and solve mathematical problems through Python interpretation. Here's how you can use it:
|
||||
|
||||
```bash
|
||||
# Usage:
|
||||
python pal_inference.py <model> <out_dir> [--dataset <dataset>] [--max_length <length>] [--top_p <threshold>] [--eoh <end token>] [--eoa <end token>] [--eos <end token>] [--temperature <temp>] [--time_out <time>] [--verbose, -v] [--append, -a]
|
||||
|
||||
# Parameters:
|
||||
# <model> Path to the model used for inference.
|
||||
# <out_dir> Generated code will be saved in the specified output folder.
|
||||
|
||||
# Optional arguments:
|
||||
# --dataset <dataset> Dataset name used for code generation (default: gsm8k).
|
||||
# --max_length <length> Model's maximum input token length (default: 2048).
|
||||
# --top_p <threshold> Probability threshold for candidate tokens (default: 0.8).
|
||||
# --eoh <end token> End of human (user) token. (default: "").
|
||||
# --eoa <end token> End of assistant (bot) token. (default: "").
|
||||
# --eos <end token> End of system token. (default: "").
|
||||
# --temperature, -t <temp> Sampling temperature during generation (default: 1.0).
|
||||
# --time_out <time> Maximum time (in seconds) for executing the generated code (default: 100).
|
||||
# --verbose, -v Print code error messages (optional).
|
||||
# --append, -a ppend the output to historical results (optional).
|
||||
```
|
||||
|
||||
Below is an example of usage:
|
||||
|
||||
```bash
|
||||
python tools/pal_inference.py internlm/internlm-chat-7k ./output -v
|
||||
```
|
||||
|
||||
The output file contains each line with the input question, the correct answer, the executed answer, the score, and the Python code block generated by the model:
|
||||
|
||||
````json
|
||||
{
|
||||
"question": "Janet\u2019s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers' market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers' market?",
|
||||
"target": 18.0,
|
||||
"answer": 18.0,
|
||||
"score": 1,
|
||||
"generation": ["```python\ndef solution():\n eggs_per_day = 16\n eggs_per_breakfast = 3\n eggs_per_muffin = 4\n eggs_used = eggs_per_day - eggs_per_breakfast - eggs_per_muffin\n eggs_sold = eggs_used\n price_per_egg = 2\n eggs_made = eggs_sold * price_per_egg\n result = eggs_made\n return result\n```"]
|
||||
}
|
||||
````
|
||||
|
||||
InternLM performance in the GSM8K dataset with and without tools:
|
||||
|
||||
| Method | **InternLM-Chat-7B** |
|
||||
| -------- | -------------------- |
|
||||
| w/o tool | 34.5 |
|
||||
| w tool | 39.2 |
|
||||
|
|
|
|||
|
|
@ -0,0 +1,320 @@
|
|||
# This file is modified from:
|
||||
# hhttps://github.com/reasoning-machines/pal/blob/main/pal/core/interface.py
|
||||
#
|
||||
# Copyright 2022 PAL Authors. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import copy
|
||||
import json
|
||||
import os
|
||||
from dataclasses import asdict
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import torch
|
||||
import tqdm
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
from tools.transformers.interface import GenerationConfig, generate_interactive
|
||||
from internlm.utils.timeout import Timeout
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="PAL Inference")
|
||||
parser.add_argument("model", type=str, help="Path to the pre-trained LLM used for inference.")
|
||||
parser.add_argument(
|
||||
"out_dir", type=str, help="Name of the output folder where generated code snippets will be saved."
|
||||
)
|
||||
parser.add_argument("--dataset", default="gsm8k", type=str, help="Name of the dataset used for code generation.")
|
||||
parser.add_argument(
|
||||
"--max_length",
|
||||
default=2048,
|
||||
type=int,
|
||||
help="Maximum input token length for the natural language description.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--top_p",
|
||||
default=0.8,
|
||||
type=float,
|
||||
help="Probability threshold to choose sample tokens during generation.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--eoh",
|
||||
default="",
|
||||
type=str,
|
||||
help="End of human (user) token.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--eoa",
|
||||
default="",
|
||||
type=str,
|
||||
help="End of assistant (bot) token.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--eos",
|
||||
default="",
|
||||
type=str,
|
||||
help="End of system token.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--temperature", "-t", default=1.0, type=float, help="Temperature of token sampling during generation."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--time_out", default=100, type=float, help="Maximum time allowed for executing generated code."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verbose",
|
||||
"-v",
|
||||
action="store_true",
|
||||
help="Print code error information when executing generated code (optional).",
|
||||
)
|
||||
parser.add_argument("--append", "-a", action="store_true", help="Append output to the history results (optional).")
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
class GenericRuntime:
|
||||
"""Adapted from https://github.com/reasoning-machines/pal"""
|
||||
|
||||
GLOBAL_DICT: dict = {}
|
||||
LOCAL_DICT = None
|
||||
HEADERS: List = []
|
||||
|
||||
def __init__(self):
|
||||
self._global_vars = copy.copy(self.GLOBAL_DICT)
|
||||
self._local_vars = copy.copy(self.LOCAL_DICT) if self.LOCAL_DICT else None
|
||||
|
||||
for c in self.HEADERS:
|
||||
self.exec_code(c)
|
||||
|
||||
def exec_code(self, code_piece: str) -> None:
|
||||
exec(code_piece, self._global_vars)
|
||||
|
||||
def eval_code(self, expr: str) -> Any:
|
||||
return eval(expr, self._global_vars)
|
||||
|
||||
def inject(self, var_dict: Dict[str, Any]) -> None:
|
||||
for k, v in var_dict.items():
|
||||
self._global_vars[k] = v
|
||||
|
||||
@property
|
||||
def answer(self):
|
||||
return self._global_vars["answer"]
|
||||
|
||||
|
||||
class PALInterface:
|
||||
"""PAL interface wrap fun:`generate_interactive` to extract and execute
|
||||
generated code.
|
||||
|
||||
Adapted from https://github.com/reasoning-machines/pal
|
||||
|
||||
Args:
|
||||
model (AutoModelForCausalLM)
|
||||
tokenizer (AutoTokenizer)
|
||||
generation_config (GenerationConfig): Decode strategies
|
||||
additional_eos_token_id (int): End of sentence token id, default: 103028
|
||||
get_answer_expr (str): The function name of generated code, default: "solution()"
|
||||
verbose (bool): Print error information
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: AutoModelForCausalLM,
|
||||
tokenizer: AutoTokenizer,
|
||||
generation_config: GenerationConfig,
|
||||
additional_eos_token_id: int = 103028,
|
||||
get_answer_expr: str = "solution()",
|
||||
verbose: bool = False,
|
||||
):
|
||||
self.runtime = GenericRuntime()
|
||||
self.history: List = []
|
||||
self.model = model
|
||||
self.tokenizer = tokenizer
|
||||
self.generation_config = generation_config
|
||||
self.additional_eos_token_id = additional_eos_token_id
|
||||
self.answer_expr = get_answer_expr
|
||||
self.verbose = verbose
|
||||
|
||||
def generate(self, prompt):
|
||||
# The api will generate response word by word
|
||||
# we only need the last generation as the final results
|
||||
for cur_gen in generate_interactive(
|
||||
model=self.model,
|
||||
tokenizer=self.tokenizer,
|
||||
prompt=prompt,
|
||||
additional_eos_token_id=self.additional_eos_token_id,
|
||||
**asdict(self.generation_config),
|
||||
):
|
||||
continue
|
||||
# Get final response
|
||||
self.history.append(cur_gen)
|
||||
# Extract code block
|
||||
code = self.process_generation_to_code(cur_gen)
|
||||
return code
|
||||
|
||||
def process_generation_to_code(self, gens: str):
|
||||
if "```python" in gens:
|
||||
gens = gens.split("```python")[1].split("```")[0]
|
||||
elif "```" in gens:
|
||||
gens = gens.split("```")[1].split("```")[0]
|
||||
code = gens.split("\n")
|
||||
return code
|
||||
|
||||
def run(self, prompt, time_out: float = 100):
|
||||
code = self.generate(prompt)
|
||||
with Timeout(time_out):
|
||||
try:
|
||||
exec_result = self.execute(code)
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(e)
|
||||
return exec_result
|
||||
|
||||
def execute(self, code: List[str]):
|
||||
self.runtime.exec_code("\n".join(code))
|
||||
return self.runtime.eval_code(self.answer_expr)
|
||||
|
||||
def clear_history(self):
|
||||
self.history = []
|
||||
|
||||
|
||||
def load_model(args):
|
||||
model = AutoModelForCausalLM.from_pretrained(args.model, trust_remote_code=True).to(torch.bfloat16).cuda()
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.model, trust_remote_code=True)
|
||||
return model, tokenizer
|
||||
|
||||
|
||||
def load_data(args):
|
||||
# Load data from huggingface dataset
|
||||
if args.dataset == "gsm8k":
|
||||
gsm8k = load_dataset(path=args.dataset, name="main")
|
||||
test_set = gsm8k["test"]
|
||||
input_data = []
|
||||
for data in test_set:
|
||||
question = data["question"]
|
||||
target = float(data["answer"].split("#")[-1].replace(",", ""))
|
||||
input_data.append({"question": question, "target": target})
|
||||
else:
|
||||
raise NotImplementedError
|
||||
return input_data
|
||||
|
||||
|
||||
PROMPT = """<|System|>:You are a helpful assistant which use tools to solve mathematical reasoning questions. The tools you can use are:
|
||||
PythonExecutor: It can execute Python code. The code must be a function, and the function name must be 'solution'. The example format is as follows:
|
||||
```python
|
||||
def solution():
|
||||
variable_names_with_real_meaning = func(variable)
|
||||
return variable_names_with_real_meaning
|
||||
```{eos}
|
||||
<|User|>:Olivia has $23. She bought five bagels for $3 each. How much money does she have left?{eoh}
|
||||
<|Bot|>:
|
||||
```python
|
||||
def solution():
|
||||
money_initial = 23
|
||||
bagels = 5
|
||||
bagel_cost = 3
|
||||
money_spent = bagels * bagel_cost
|
||||
money_left = money_initial - money_spent
|
||||
result = money_left
|
||||
return result
|
||||
```{eoa}
|
||||
<|User|>:Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?{eoh}
|
||||
<|Bot|>:
|
||||
```python
|
||||
def solution():
|
||||
golf_balls_initial = 58
|
||||
golf_balls_lost_tuesday = 23
|
||||
golf_balls_lost_wednesday = 2
|
||||
golf_balls_left = golf_balls_initial - golf_balls_lost_tuesday - golf_balls_lost_wednesday
|
||||
result = golf_balls_left
|
||||
return result
|
||||
```{eoa}
|
||||
<|User|>:There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?{eoh}
|
||||
<|Bot|>:
|
||||
```python
|
||||
def solution():
|
||||
computers_initial = 9
|
||||
computers_per_day = 5
|
||||
num_days = 4 # 4 days between monday and thursday
|
||||
computers_added = computers_per_day * num_days
|
||||
computers_total = computers_initial + computers_added
|
||||
result = computers_total
|
||||
return result
|
||||
```{eoa}
|
||||
<|System|>:How about this question?{eos}
|
||||
<|User|>:{question}{eoh}
|
||||
<|Bot|>:""".strip()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
args = parse_args()
|
||||
|
||||
print("load model begin.")
|
||||
model, tokenizer = load_model(args)
|
||||
print("load model end.")
|
||||
|
||||
generation_config = GenerationConfig(max_length=args.max_length, top_p=args.top_p, temperature=args.temperature)
|
||||
|
||||
verbose = args.verbose
|
||||
interface = PALInterface(model=model, tokenizer=tokenizer, generation_config=generation_config, verbose=verbose)
|
||||
|
||||
if not os.path.exists(args.out_dir):
|
||||
os.makedirs(args.out_dir)
|
||||
savepath = os.path.join(args.out_dir, args.dataset + ".json")
|
||||
|
||||
# Load from history results
|
||||
if args.append and os.path.exists(savepath):
|
||||
lines = open(savepath).readlines()
|
||||
num_skip_exps = len(lines)
|
||||
scores = [x["score"] for x in map(json.loads, lines)]
|
||||
else:
|
||||
num_skip_exps = 0
|
||||
scores = []
|
||||
|
||||
examples = load_data(args)
|
||||
with open(savepath, "a" if args.append else "w") as f:
|
||||
pbar = tqdm.tqdm(examples[num_skip_exps:], initial=num_skip_exps, total=len(examples))
|
||||
for x in pbar:
|
||||
question = x["question"]
|
||||
result = copy.copy(x)
|
||||
|
||||
try:
|
||||
answer = interface.run(
|
||||
prompt=PROMPT.format(question=question, eoh=args.eoh, eoa=args.eoa, eos=args.eos),
|
||||
time_out=args.time_out,
|
||||
)
|
||||
answer = float(answer)
|
||||
score = 1 if abs(answer - x["target"]) < 1e-3 else 0
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
print(e)
|
||||
answer = ""
|
||||
score = 0
|
||||
scores.append(score)
|
||||
result["answer"] = answer
|
||||
result["score"] = score
|
||||
result["generation"] = interface.history
|
||||
f.write(json.dumps(result) + "\n")
|
||||
|
||||
interface.clear_history()
|
||||
f.flush()
|
||||
|
||||
print(f"{args.model}: Accuracy - {sum(scores) / len(scores)}")
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,137 @@
|
|||
import copy
|
||||
import warnings
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable, List, Optional
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList
|
||||
from transformers.utils import logging
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GenerationConfig:
|
||||
max_length: Optional[int] = None
|
||||
top_p: Optional[float] = None
|
||||
temperature: Optional[float] = None
|
||||
do_sample: Optional[bool] = True
|
||||
repetition_penalty: Optional[float] = 1.0
|
||||
|
||||
|
||||
@torch.inference_mode()
|
||||
def generate_interactive(
|
||||
model,
|
||||
tokenizer,
|
||||
prompt,
|
||||
generation_config: Optional[GenerationConfig] = None,
|
||||
logits_processor: Optional[LogitsProcessorList] = None,
|
||||
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
||||
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
|
||||
additional_eos_token_id: Optional[int] = None,
|
||||
**kwargs,
|
||||
):
|
||||
inputs = tokenizer([prompt], padding=True, return_tensors="pt")
|
||||
input_length = len(inputs["input_ids"][0])
|
||||
for k, v in inputs.items():
|
||||
inputs[k] = v.cuda()
|
||||
input_ids = inputs["input_ids"]
|
||||
batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
|
||||
if generation_config is None:
|
||||
generation_config = model.generation_config
|
||||
generation_config = copy.deepcopy(generation_config)
|
||||
model_kwargs = generation_config.update(**kwargs)
|
||||
bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
|
||||
if isinstance(eos_token_id, int):
|
||||
eos_token_id = [eos_token_id]
|
||||
if additional_eos_token_id is not None:
|
||||
eos_token_id.append(additional_eos_token_id)
|
||||
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
|
||||
if has_default_max_length and generation_config.max_new_tokens is None:
|
||||
warnings.warn(
|
||||
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
|
||||
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
|
||||
" recommend using `max_new_tokens` to control the maximum length of the generation.",
|
||||
UserWarning,
|
||||
)
|
||||
elif generation_config.max_new_tokens is not None:
|
||||
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
|
||||
if not has_default_max_length:
|
||||
logger.warn(
|
||||
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
|
||||
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
|
||||
"Please refer to the documentation for more information. "
|
||||
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
|
||||
UserWarning,
|
||||
)
|
||||
|
||||
if input_ids_seq_length >= generation_config.max_length:
|
||||
input_ids_string = "input_ids"
|
||||
logger.warning(
|
||||
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
|
||||
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
|
||||
" increasing `max_new_tokens`."
|
||||
)
|
||||
|
||||
# 2. Set generation parameters if not already defined
|
||||
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
|
||||
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
|
||||
|
||||
logits_processor = model._get_logits_processor(
|
||||
generation_config=generation_config,
|
||||
input_ids_seq_length=input_ids_seq_length,
|
||||
encoder_input_ids=input_ids,
|
||||
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
|
||||
logits_processor=logits_processor,
|
||||
)
|
||||
|
||||
stopping_criteria = model._get_stopping_criteria(
|
||||
generation_config=generation_config, stopping_criteria=stopping_criteria
|
||||
)
|
||||
logits_warper = model._get_logits_warper(generation_config)
|
||||
|
||||
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
|
||||
scores = None
|
||||
while True:
|
||||
model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs)
|
||||
# forward pass to get next token
|
||||
outputs = model(
|
||||
**model_inputs,
|
||||
return_dict=True,
|
||||
output_attentions=False,
|
||||
output_hidden_states=False,
|
||||
)
|
||||
|
||||
next_token_logits = outputs.logits[:, -1, :]
|
||||
|
||||
# pre-process distribution
|
||||
next_token_scores = logits_processor(input_ids, next_token_logits)
|
||||
next_token_scores = logits_warper(input_ids, next_token_scores)
|
||||
|
||||
# sample
|
||||
probs = nn.functional.softmax(next_token_scores, dim=-1)
|
||||
if generation_config.do_sample:
|
||||
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
|
||||
else:
|
||||
next_tokens = torch.argmax(probs, dim=-1)
|
||||
|
||||
# update generated ids, model inputs, and length for next step
|
||||
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
|
||||
model_kwargs = model._update_model_kwargs_for_generation(
|
||||
outputs, model_kwargs, is_encoder_decoder=False
|
||||
)
|
||||
unfinished_sequences = unfinished_sequences.mul((min(next_tokens != i for i in eos_token_id)).long())
|
||||
|
||||
output_token_ids = input_ids[0].cpu().tolist()
|
||||
output_token_ids = output_token_ids[input_length:]
|
||||
for each_eos_token_id in eos_token_id:
|
||||
if output_token_ids[-1] == each_eos_token_id:
|
||||
output_token_ids = output_token_ids[:-1]
|
||||
response = tokenizer.decode(output_token_ids)
|
||||
|
||||
yield response
|
||||
# stop when each sentence is finished, or if we exceed the maximum length
|
||||
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
|
||||
break
|
||||
41
train.py
41
train.py
|
|
@ -7,6 +7,7 @@ import traceback
|
|||
from functools import partial
|
||||
from typing import Iterable
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from torch import nn
|
||||
|
|
@ -61,6 +62,10 @@ from internlm.utils.parallel import (
|
|||
sync_model_param_within_tp,
|
||||
)
|
||||
from internlm.utils.registry import MODEL_INITIALIZER
|
||||
from internlm.utils.simple_memory_profiler import (
|
||||
SimpleMemoryProfiler,
|
||||
build_activation_config,
|
||||
)
|
||||
from internlm.utils.writer import Writer
|
||||
|
||||
# global llm logger
|
||||
|
|
@ -555,7 +560,12 @@ def main(args):
|
|||
scheduler_hooks = [
|
||||
SchedulerMetricHook(
|
||||
metric=metric,
|
||||
skip=gpc.is_using_pp() and gpc.config.parallel["pipeline"].get("interleaved_overlap", False),
|
||||
skip=(
|
||||
gpc.is_using_pp()
|
||||
and hasattr(gpc.config.model, "num_chunks")
|
||||
and gpc.config.model.num_chunks > 1
|
||||
and gpc.config.parallel["pipeline"].get("interleaved_overlap", False)
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
|
|
@ -569,6 +579,19 @@ def main(args):
|
|||
scheduler_hooks=scheduler_hooks,
|
||||
)
|
||||
|
||||
# initialize simple memory profiler
|
||||
if args.profiling:
|
||||
memory_profiler = SimpleMemoryProfiler(
|
||||
model.model,
|
||||
optimizer.optim,
|
||||
log_folder=f"memory_trace/rank{gpc.get_global_rank()}_"
|
||||
+ f"dp{gpc.get_local_rank(ParallelMode.DATA)}_"
|
||||
+ f"tp{gpc.get_local_rank(ParallelMode.TENSOR)}",
|
||||
activation_config=build_activation_config(gpc.config.model.num_layers),
|
||||
)
|
||||
else:
|
||||
memory_profiler = None
|
||||
|
||||
# initialize the batch skipper
|
||||
batch_skipper = BatchSkipper(skip_batches)
|
||||
|
||||
|
|
@ -622,13 +645,13 @@ def main(args):
|
|||
trainer_result = trainer.step()
|
||||
assert trainer_result is not None
|
||||
|
||||
success_update, grad_norm = trainer_result
|
||||
success_update, grad_norm_groups = trainer_result
|
||||
if success_update: # update parameters successfully
|
||||
train_state.step_count += 1
|
||||
else:
|
||||
train_state.inf_nan_skip_batches += 1 # record the amount of updating parameters unsuccessfully.
|
||||
if grad_norm == -99.0 and gpc.is_rank_for_log(): # -99.0 encodes a specific failure case
|
||||
logger.warning(f"Warning: skip parameter update at step {batch_count}.") # pylint: disable=W1203
|
||||
if -99.0 in grad_norm_groups and gpc.is_rank_for_log(): # -99.0 encodes a specific failure case
|
||||
logger.warning(f"Warning: skip parameter update at step {batch_count}.")
|
||||
send_alert_message(
|
||||
address=gpc.config.alert_address, message=f"Warning: skip parameter update at step {batch_count}."
|
||||
)
|
||||
|
|
@ -648,7 +671,7 @@ def main(args):
|
|||
start_time=start_time,
|
||||
loss=loss,
|
||||
moe_loss=moe_loss,
|
||||
grad_norm=grad_norm,
|
||||
grad_norm=np.array(grad_norm_groups),
|
||||
metric=metric,
|
||||
update_panel=uniscale_logger is not None,
|
||||
)
|
||||
|
|
@ -667,6 +690,9 @@ def main(args):
|
|||
update_panel=uniscale_logger is not None,
|
||||
)
|
||||
|
||||
if memory_profiler is not None:
|
||||
memory_profiler.step()
|
||||
|
||||
# checkpoint the training states in specific steps, which is determined by the args "checkpoint_every"
|
||||
# # save batch sampler that tracks the true consumed samples
|
||||
ckpt_save_manager.try_save_checkpoint(train_state)
|
||||
|
|
@ -687,8 +713,7 @@ if __name__ == "__main__":
|
|||
try:
|
||||
main(args)
|
||||
except Exception:
|
||||
logger.error( # pylint: disable=W1203
|
||||
f"Raise exception from {hostname} with rank id: {gpc.get_global_rank()}",
|
||||
exc_info=traceback.format_exc(),
|
||||
logger.error(
|
||||
f"Raise exception from {hostname} with rank id: {gpc.get_global_rank()}\n{traceback.format_exc()}",
|
||||
)
|
||||
mm.monitor_exception(alert_address=gpc.config.alert_address, excp_info=traceback.format_exc())
|
||||
|
|
|
|||
177
web_demo.py
177
web_demo.py
|
|
@ -1,158 +1,35 @@
|
|||
"""
|
||||
This script refers to the dialogue example of streamlit, the interactive generation code of chatglm2 and transformers. We mainly modified part of the code logic to adapt to the generation of our model.
|
||||
This script refers to the dialogue example of streamlit, the interactive generation code of chatglm2 and transformers.
|
||||
We mainly modified part of the code logic to adapt to the generation of our model.
|
||||
Please refer to these links below for more information:
|
||||
1. streamlit chat example: https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps
|
||||
2. chatglm2: https://github.com/THUDM/ChatGLM2-6B
|
||||
3. transformers: https://github.com/huggingface/transformers
|
||||
"""
|
||||
|
||||
from dataclasses import asdict
|
||||
|
||||
import streamlit as st
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from dataclasses import dataclass, asdict
|
||||
from typing import List, Optional, Callable, Optional
|
||||
import copy
|
||||
import warnings
|
||||
import logging
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.utils import logging
|
||||
from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig
|
||||
|
||||
from tools.transformers.interface import GenerationConfig, generate_interactive
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
@torch.inference_mode()
|
||||
def generate_interactive(
|
||||
model,
|
||||
tokenizer,
|
||||
prompt,
|
||||
generation_config: Optional[GenerationConfig] = None,
|
||||
logits_processor: Optional[LogitsProcessorList] = None,
|
||||
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
||||
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
|
||||
additional_eos_token_id: Optional[int] = None,
|
||||
**kwargs,
|
||||
):
|
||||
inputs = tokenizer([prompt], padding=True, return_tensors="pt")
|
||||
input_length = len(inputs["input_ids"][0])
|
||||
for k, v in inputs.items():
|
||||
inputs[k] = v.cuda()
|
||||
input_ids = inputs["input_ids"]
|
||||
batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
|
||||
if generation_config is None:
|
||||
generation_config = model.generation_config
|
||||
generation_config = copy.deepcopy(generation_config)
|
||||
model_kwargs = generation_config.update(**kwargs)
|
||||
bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
|
||||
if isinstance(eos_token_id, int):
|
||||
eos_token_id = [eos_token_id]
|
||||
if additional_eos_token_id is not None:
|
||||
eos_token_id.append(additional_eos_token_id)
|
||||
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
|
||||
if has_default_max_length and generation_config.max_new_tokens is None:
|
||||
warnings.warn(
|
||||
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
|
||||
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
|
||||
" recommend using `max_new_tokens` to control the maximum length of the generation.",
|
||||
UserWarning,
|
||||
)
|
||||
elif generation_config.max_new_tokens is not None:
|
||||
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
|
||||
if not has_default_max_length:
|
||||
logger.warn(
|
||||
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
|
||||
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
|
||||
"Please refer to the documentation for more information. "
|
||||
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
|
||||
UserWarning,
|
||||
)
|
||||
|
||||
if input_ids_seq_length >= generation_config.max_length:
|
||||
input_ids_string = "input_ids"
|
||||
logger.warning(
|
||||
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
|
||||
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
|
||||
" increasing `max_new_tokens`."
|
||||
)
|
||||
|
||||
# 2. Set generation parameters if not already defined
|
||||
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
|
||||
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
|
||||
|
||||
logits_processor = model._get_logits_processor(
|
||||
generation_config=generation_config,
|
||||
input_ids_seq_length=input_ids_seq_length,
|
||||
encoder_input_ids=input_ids,
|
||||
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
|
||||
logits_processor=logits_processor,
|
||||
)
|
||||
|
||||
stopping_criteria = model._get_stopping_criteria(
|
||||
generation_config=generation_config, stopping_criteria=stopping_criteria
|
||||
)
|
||||
logits_warper = model._get_logits_warper(generation_config)
|
||||
|
||||
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
|
||||
scores = None
|
||||
while True:
|
||||
model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs)
|
||||
# forward pass to get next token
|
||||
outputs = model(
|
||||
**model_inputs,
|
||||
return_dict=True,
|
||||
output_attentions=False,
|
||||
output_hidden_states=False,
|
||||
)
|
||||
|
||||
next_token_logits = outputs.logits[:, -1, :]
|
||||
|
||||
# pre-process distribution
|
||||
next_token_scores = logits_processor(input_ids, next_token_logits)
|
||||
next_token_scores = logits_warper(input_ids, next_token_scores)
|
||||
|
||||
# sample
|
||||
probs = nn.functional.softmax(next_token_scores, dim=-1)
|
||||
if generation_config.do_sample:
|
||||
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
|
||||
else:
|
||||
next_tokens = torch.argmax(probs, dim=-1)
|
||||
|
||||
# update generated ids, model inputs, and length for next step
|
||||
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
|
||||
model_kwargs = model._update_model_kwargs_for_generation(
|
||||
outputs, model_kwargs, is_encoder_decoder=False
|
||||
)
|
||||
unfinished_sequences = unfinished_sequences.mul((min(next_tokens != i for i in eos_token_id)).long())
|
||||
|
||||
output_token_ids = input_ids[0].cpu().tolist()
|
||||
output_token_ids = output_token_ids[input_length:]
|
||||
for each_eos_token_id in eos_token_id:
|
||||
if output_token_ids[-1] == each_eos_token_id:
|
||||
output_token_ids = output_token_ids[:-1]
|
||||
response = tokenizer.decode(output_token_ids)
|
||||
|
||||
yield response
|
||||
# stop when each sentence is finished, or if we exceed the maximum length
|
||||
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
|
||||
break
|
||||
|
||||
|
||||
def on_btn_click():
|
||||
del st.session_state.messages
|
||||
|
||||
|
||||
@dataclass
|
||||
class GenerationConfig:
|
||||
max_length: Optional[int] = None
|
||||
top_p: Optional[float] = None
|
||||
temperature: Optional[float] = None
|
||||
do_sample: Optional[bool] = True
|
||||
repetition_penalty: Optional[float] = 1.0
|
||||
|
||||
|
||||
@st.cache_resource
|
||||
def load_model():
|
||||
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda()
|
||||
model = (
|
||||
AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
|
||||
.to(torch.bfloat16)
|
||||
.cuda()
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
|
||||
return model, tokenizer
|
||||
|
||||
|
|
@ -160,19 +37,11 @@ def load_model():
|
|||
def prepare_generation_config():
|
||||
with st.sidebar:
|
||||
max_length = st.slider("Max Length", min_value=32, max_value=2048, value=2048)
|
||||
top_p = st.slider(
|
||||
'Top P', 0.0, 1.0, 0.8, step=0.01
|
||||
)
|
||||
temperature = st.slider(
|
||||
'Temperature', 0.0, 1.0, 0.7, step=0.01
|
||||
)
|
||||
top_p = st.slider("Top P", 0.0, 1.0, 0.8, step=0.01)
|
||||
temperature = st.slider("Temperature", 0.0, 1.0, 0.7, step=0.01)
|
||||
st.button("Clear Chat History", on_click=on_btn_click)
|
||||
|
||||
generation_config = GenerationConfig(
|
||||
max_length=max_length,
|
||||
top_p=top_p,
|
||||
temperature=temperature
|
||||
)
|
||||
generation_config = GenerationConfig(max_length=max_length, top_p=top_p, temperature=temperature)
|
||||
|
||||
return generation_config
|
||||
|
||||
|
|
@ -199,7 +68,7 @@ def combine_history(prompt):
|
|||
|
||||
|
||||
def main():
|
||||
#torch.cuda.empty_cache()
|
||||
# torch.cuda.empty_cache()
|
||||
print("load model begin.")
|
||||
model, tokenizer = load_model()
|
||||
print("load model end.")
|
||||
|
|
@ -231,7 +100,13 @@ def main():
|
|||
|
||||
with st.chat_message("robot", avatar=robot_avator):
|
||||
message_placeholder = st.empty()
|
||||
for cur_response in generate_interactive(model=model, tokenizer=tokenizer, prompt=real_prompt, additional_eos_token_id=103028, **asdict(generation_config)):
|
||||
for cur_response in generate_interactive(
|
||||
model=model,
|
||||
tokenizer=tokenizer,
|
||||
prompt=real_prompt,
|
||||
additional_eos_token_id=103028,
|
||||
**asdict(generation_config),
|
||||
):
|
||||
# Display robot response in chat message container
|
||||
message_placeholder.markdown(cur_response + "▌")
|
||||
message_placeholder.markdown(cur_response)
|
||||
|
|
@ -242,11 +117,3 @@ def main():
|
|||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue