mirror of https://github.com/hpcaitech/ColossalAI
[Fix] resolve conflicts of merging main
commit
ed5ebd1735
|
@ -1,2 +1 @@
|
|||
2.0.0-11.7.0
|
||||
2.1.0-11.8.0
|
||||
2.1.0-12.1.0
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
{
|
||||
"build": [
|
||||
{
|
||||
"torch_command": "pip install torch==1.12.1+cu102 torchvision==0.13.1+cu102 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu102",
|
||||
"cuda_image": "hpcaitech/cuda-conda:10.2"
|
||||
"torch_command": "pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cu121",
|
||||
"cuda_image": "hpcaitech/cuda-conda:12.1"
|
||||
},
|
||||
{
|
||||
"torch_command": "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113",
|
||||
"cuda_image": "hpcaitech/cuda-conda:11.3"
|
||||
"torch_command": "pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cu118",
|
||||
"cuda_image": "hpcaitech/cuda-conda:11.8"
|
||||
},
|
||||
{
|
||||
"torch_command": "pip install torch==1.12.1+cu116 torchvision==0.13.1+cu116 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu116",
|
||||
"cuda_image": "hpcaitech/cuda-conda:11.6"
|
||||
"torch_command": "pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1",
|
||||
"cuda_image": "hpcaitech/cuda-conda:11.7"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
- [ ] I have created an issue for this PR for traceability
|
||||
- [ ] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description`
|
||||
- [ ] I have added relevant tags if possible for us to better distinguish different PRs
|
||||
- [ ] I have installed pre-commit: `pip install pre-commit && pre-commit install`
|
||||
|
||||
|
||||
## 🚨 Issue number
|
||||
|
|
|
@ -117,7 +117,7 @@ jobs:
|
|||
cd TensorNVMe
|
||||
conda install cmake
|
||||
pip install -r requirements.txt
|
||||
pip install -v .
|
||||
DISABLE_URING=1 pip install -v .
|
||||
|
||||
- name: Store TensorNVMe Cache
|
||||
run: |
|
||||
|
|
|
@ -44,7 +44,7 @@ jobs:
|
|||
cd TensorNVMe
|
||||
conda install cmake
|
||||
pip install -r requirements.txt
|
||||
pip install -v .
|
||||
DISABLE_URING=1 pip install -v .
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
if: steps.check-avai.outputs.avai == 'true'
|
||||
|
@ -67,7 +67,6 @@ jobs:
|
|||
--durations=0 \
|
||||
tests/
|
||||
env:
|
||||
NCCL_SHM_DISABLE: 1
|
||||
LD_LIBRARY_PATH: /github/home/.tensornvme/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||
LLAMA_PATH: /data/scratch/llama-tiny
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ jobs:
|
|||
matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}}
|
||||
container:
|
||||
image: ${{ matrix.container }}
|
||||
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10 -v /data/scratch/llama-tiny:/data/scratch/llama-tiny
|
||||
options: --gpus all --rm -v /dev/shm -v /data/scratch/cifar-10:/data/scratch/cifar-10 -v /data/scratch/llama-tiny:/data/scratch/llama-tiny
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
|
@ -66,7 +66,7 @@ jobs:
|
|||
cd TensorNVMe
|
||||
apt update && apt install -y cmake
|
||||
pip install -r requirements.txt
|
||||
pip install -v .
|
||||
DISABLE_URING=1 pip install -v .
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ssh-key: ${{ secrets.SSH_KEY_FOR_CI }}
|
||||
|
@ -83,13 +83,12 @@ jobs:
|
|||
fi
|
||||
- name: Install Colossal-AI
|
||||
run: |
|
||||
CUDA_EXT=1 pip install -v .
|
||||
BUILD_EXT=1 pip install -v .
|
||||
pip install -r requirements/requirements-test.txt
|
||||
- name: Unit Testing
|
||||
run: |
|
||||
PYTHONPATH=$PWD pytest tests
|
||||
PYTHONPATH=$PWD pytest --durations=0 tests
|
||||
env:
|
||||
DATA: /data/scratch/cifar-10
|
||||
NCCL_SHM_DISABLE: 1
|
||||
LD_LIBRARY_PATH: /github/home/.tensornvme/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||
LLAMA_PATH: /data/scratch/llama-tiny
|
||||
|
|
|
@ -41,7 +41,7 @@ jobs:
|
|||
matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}}
|
||||
container:
|
||||
image: ${{ matrix.container }}
|
||||
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10 -v /data/scratch/llama-tiny:/data/scratch/llama-tiny
|
||||
options: --gpus all --rm -v /dev/shm -v /data/scratch/cifar-10:/data/scratch/cifar-10 -v /data/scratch/llama-tiny:/data/scratch/llama-tiny
|
||||
timeout-minutes: 120
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-run-test-${{ matrix.container }}
|
||||
|
@ -60,7 +60,7 @@ jobs:
|
|||
cd TensorNVMe
|
||||
apt update && apt install -y cmake
|
||||
pip install -r requirements.txt
|
||||
pip install -v .
|
||||
DISABLE_URING=1 pip install -v .
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ssh-key: ${{ secrets.SSH_KEY_FOR_CI }}
|
||||
|
@ -78,13 +78,12 @@ jobs:
|
|||
|
||||
- name: Install Colossal-AI
|
||||
run: |
|
||||
CUDA_EXT=1 pip install -v .
|
||||
BUILD_EXT=1 pip install -v .
|
||||
pip install -r requirements/requirements-test.txt
|
||||
- name: Unit Testing
|
||||
run: |
|
||||
PYTHONPATH=$PWD pytest tests
|
||||
PYTHONPATH=$PWD pytest --durations=0 tests
|
||||
env:
|
||||
DATA: /data/scratch/cifar-10
|
||||
NCCL_SHM_DISABLE: 1
|
||||
LD_LIBRARY_PATH: /github/home/.tensornvme/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||
LLAMA_PATH: /data/scratch/llama-tiny
|
||||
|
|
|
@ -38,7 +38,7 @@ jobs:
|
|||
matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}}
|
||||
container:
|
||||
image: ${{ matrix.container }}
|
||||
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10 -v /data/scratch/llama-tiny:/data/scratch/llama-tiny
|
||||
options: --gpus all --rm -v /dev/shm -v /data/scratch/cifar-10:/data/scratch/cifar-10 -v /data/scratch/llama-tiny:/data/scratch/llama-tiny
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
|
@ -56,7 +56,7 @@ jobs:
|
|||
cd TensorNVMe
|
||||
apt update && apt install -y cmake
|
||||
pip install -r requirements.txt
|
||||
pip install -v .
|
||||
DISABLE_URING=1 pip install -v .
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ssh-key: ${{ secrets.SSH_KEY_FOR_CI }}
|
||||
|
@ -75,15 +75,14 @@ jobs:
|
|||
|
||||
- name: Install Colossal-AI
|
||||
run: |
|
||||
CUDA_EXT=1 pip install -v .
|
||||
BUILD_EXT=1 pip install -v .
|
||||
pip install -r requirements/requirements-test.txt
|
||||
|
||||
- name: Unit Testing
|
||||
run: |
|
||||
PYTHONPATH=$PWD pytest tests
|
||||
PYTHONPATH=$PWD pytest --durations=0 tests
|
||||
env:
|
||||
DATA: /data/scratch/cifar-10
|
||||
NCCL_SHM_DISABLE: 1
|
||||
LD_LIBRARY_PATH: /github/home/.tensornvme/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||
LLAMA_PATH: /data/scratch/llama-tiny
|
||||
|
||||
|
|
|
@ -51,4 +51,4 @@ jobs:
|
|||
|
||||
- name: Build
|
||||
run: |
|
||||
CUDA_EXT=1 pip install -v .
|
||||
BUILD_EXT=1 pip install -v .
|
||||
|
|
|
@ -89,7 +89,7 @@ jobs:
|
|||
- name: Install ColossalAI
|
||||
run: |
|
||||
source activate pytorch
|
||||
CUDA_EXT=1 pip install -v .
|
||||
BUILD_EXT=1 pip install -v .
|
||||
|
||||
- name: Test the Doc
|
||||
run: |
|
||||
|
|
|
@ -32,7 +32,7 @@ jobs:
|
|||
|
||||
- name: Install ColossalAI
|
||||
run: |
|
||||
CUDA_EXT=1 pip install -v .
|
||||
BUILD_EXT=1 pip install -v .
|
||||
|
||||
- name: Install Doc Test Requirements
|
||||
run: |
|
||||
|
|
|
@ -46,19 +46,17 @@ jobs:
|
|||
matrix: ${{fromJson(needs.manual_check_matrix_preparation.outputs.matrix)}}
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
options: --gpus all --rm -v /data/scratch/examples-data:/data/
|
||||
options: --gpus all --rm -v /data/scratch/examples-data:/data/ -v /dev/shm
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: 📚 Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Install Colossal-AI
|
||||
run: |
|
||||
CUDA_EXT=1 pip install -v .
|
||||
BUILD_EXT=1 pip install -v .
|
||||
- name: Test the example
|
||||
run: |
|
||||
dir=${{ matrix.directory }}
|
||||
echo "Testing ${dir} now"
|
||||
cd "${PWD}/examples/${dir}"
|
||||
bash test_ci.sh
|
||||
env:
|
||||
NCCL_SHM_DISABLE: 1
|
||||
|
|
|
@ -78,7 +78,7 @@ jobs:
|
|||
matrix: ${{fromJson(needs.detect-changed-example.outputs.matrix)}}
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
options: --gpus all --rm -v /data/scratch/examples-data:/data/
|
||||
options: --gpus all --rm -v /data/scratch/examples-data:/data/ -v /dev/shm
|
||||
timeout-minutes: 20
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-run-example-${{ matrix.directory }}
|
||||
|
@ -88,12 +88,10 @@ jobs:
|
|||
|
||||
- name: Install Colossal-AI
|
||||
run: |
|
||||
CUDA_EXT=1 pip install -v .
|
||||
BUILD_EXT=1 pip install -v .
|
||||
|
||||
- name: Test the example
|
||||
run: |
|
||||
example_dir=${{ matrix.directory }}
|
||||
cd "${PWD}/examples/${example_dir}"
|
||||
bash test_ci.sh
|
||||
env:
|
||||
NCCL_SHM_DISABLE: 1
|
||||
|
|
|
@ -35,6 +35,7 @@ jobs:
|
|||
matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}}
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
options: --gpus all --rm -v /data/scratch/examples-data:/data/ -v /dev/shm
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: 📚 Checkout
|
||||
|
@ -42,7 +43,7 @@ jobs:
|
|||
|
||||
- name: Install Colossal-AI
|
||||
run: |
|
||||
CUDA_EXT=1 pip install -v .
|
||||
BUILD_EXT=1 pip install -v .
|
||||
|
||||
- name: Traverse all files
|
||||
run: |
|
||||
|
@ -50,8 +51,6 @@ jobs:
|
|||
echo "Testing ${example_dir} now"
|
||||
cd "${PWD}/examples/${example_dir}"
|
||||
bash test_ci.sh
|
||||
env:
|
||||
NCCL_SHM_DISABLE: 1
|
||||
|
||||
- name: Notify Lark
|
||||
id: message-preparation
|
||||
|
|
|
@ -1,97 +0,0 @@
|
|||
name: post-commit
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
|
||||
jobs:
|
||||
# this job will run after a PR is merged to run pre-commit on any changed file
|
||||
# so that the user does not need to learn pre-commit and pre-commit can still
|
||||
# be auto-executed by the workflow
|
||||
pre-commit:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.merged == true && github.repository == 'hpcaitech/ColossalAI'
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
# the PR branch and the hpcaitech/colossal-ai main branch
|
||||
# must share a common commit, we need to locate that commit,
|
||||
# which is the commit checked-out or forked when the PR branch is created
|
||||
# such that we can look for files changed since that commit
|
||||
- name: Locate base commit
|
||||
id: locate-base-sha
|
||||
run: |
|
||||
curBranch=$(git rev-parse --abbrev-ref HEAD)
|
||||
commonCommit=$(git merge-base origin/main $curBranch)
|
||||
echo $commonCommit
|
||||
echo "baseSHA=$commonCommit" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Find the changed files
|
||||
id: find-changed-files
|
||||
uses: tj-actions/changed-files@v35
|
||||
with:
|
||||
base_sha: ${{ steps.locate-base-sha.outputs.baseSHA }}
|
||||
|
||||
- name: List all changed files
|
||||
run: |
|
||||
for file in ${{ steps.find-changed-files.outputs.all_changed_files }}; do
|
||||
echo "$file was changed"
|
||||
done
|
||||
|
||||
# check out the main branch
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: 'main'
|
||||
|
||||
- uses: actions/setup-python@v3
|
||||
|
||||
- name: Cache pre-commit hooks
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pre-commit
|
||||
key: ${{ runner.os }}-pre-commit-hooks
|
||||
|
||||
- name: Set up pre-commit
|
||||
run: |
|
||||
pip install pre-commit
|
||||
pre-commit install
|
||||
|
||||
# run pre-commit on changed files
|
||||
- name: Run Pre-commit
|
||||
run: |
|
||||
for file in ${{ steps.find-changed-files.outputs.all_changed_files }}; do
|
||||
pre-commit run --files $file || true
|
||||
done
|
||||
|
||||
# create commit for pre-commit
|
||||
# when all files are well formatted, there is no need to create a commit
|
||||
# therefore, this step will produce an error, which should be allowed
|
||||
- name: Create commits
|
||||
id: commit
|
||||
continue-on-error: true
|
||||
run: |
|
||||
git config --global user.name 'github-actions'
|
||||
git config --global user.email 'github-actions@github.com'
|
||||
git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}
|
||||
git add -A
|
||||
git commit -am "[format] applied code formatting on changed files in pull request ${{ github.event.pull_request.number }}"
|
||||
|
||||
# create pull request
|
||||
- name: Create Pull Request
|
||||
if: steps.commit.outcome == 'success'
|
||||
id: cpr
|
||||
uses: peter-evans/create-pull-request@v4
|
||||
with:
|
||||
branch: pre-commit-${{ github.event.pull_request.number }}
|
||||
title: "[format] applied code formatting on changed files in PR ${{ github.event.pull_request.number }}"
|
||||
|
||||
- name: Enable Auto-merge for the New PR
|
||||
if: steps.commit.outcome == 'success'
|
||||
uses: peter-evans/enable-pull-request-automerge@v2
|
||||
with:
|
||||
pull-request-number: ${{ steps.cpr.outputs.pull-request-number }}
|
||||
merge-method: squash
|
|
@ -19,8 +19,8 @@ jobs:
|
|||
runs-on: [self-hosted, gpu]
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
options: --gpus all --rm -v /data/scratch/github_actions/chat:/data/scratch/github_actions/chat --shm-size=10.24gb
|
||||
timeout-minutes: 30
|
||||
options: --gpus all --rm -v /data/scratch/examples-data:/data/scratch/examples-data --shm-size=10.24gb
|
||||
timeout-minutes: 60
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
@ -28,26 +28,35 @@ jobs:
|
|||
- name: Checkout ColossalAI
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install Colossal-AI
|
||||
run: |
|
||||
BUILD_EXT=1 pip install -v -e .
|
||||
|
||||
- name: Install ChatGPT
|
||||
run: |
|
||||
cd applications/Chat
|
||||
cd applications/ColossalChat
|
||||
pip install -v .
|
||||
export BUILD_EXT=1
|
||||
pip install -r examples/requirements.txt
|
||||
|
||||
- name: Install Transformers
|
||||
run: |
|
||||
pip install transformers==4.30.2
|
||||
pip install transformers==4.34.1
|
||||
|
||||
- name: Execute Examples
|
||||
run: |
|
||||
cd applications/Chat
|
||||
cd applications/ColossalChat
|
||||
rm -rf ~/.cache/colossalai
|
||||
./tests/test_inference.sh
|
||||
./tests/test_benchmarks.sh
|
||||
mkdir models
|
||||
mkdir sft_data
|
||||
mkdir prompt_data
|
||||
mkdir preference_data
|
||||
./tests/test_data_preparation.sh
|
||||
./tests/test_train.sh
|
||||
env:
|
||||
NCCL_SHM_DISABLE: 1
|
||||
MAX_JOBS: 8
|
||||
SFT_DATASET: /data/scratch/github_actions/chat/data.json
|
||||
PROMPT_DATASET: /data/scratch/github_actions/chat/prompts_en.jsonl
|
||||
PRETRAIN_DATASET: /data/scratch/github_actions/chat/alpaca_data.json
|
||||
PRETRAINED_MODEL_PATH: ./models
|
||||
SFT_DATASET: ./sft_data
|
||||
PROMPT_DATASET: ./prompt_data
|
||||
PREFERENCE_DATASET: ./preference_data
|
||||
|
|
|
@ -21,7 +21,7 @@ jobs:
|
|||
runs-on: [self-hosted, gpu]
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
options: --gpus all --rm -v /data/scratch/chatgpt:/data/scratch/chatgpt
|
||||
options: --gpus all --rm -v /data/scratch/examples-data:/data/scratch/examples-data
|
||||
timeout-minutes: 30
|
||||
defaults:
|
||||
run:
|
||||
|
@ -32,15 +32,17 @@ jobs:
|
|||
|
||||
- name: Install ChatGPT
|
||||
run: |
|
||||
cd applications/Chat
|
||||
cd applications/ColossalChat
|
||||
pip install -v .
|
||||
pip install -r requirements-test.txt
|
||||
pip install pytest
|
||||
|
||||
- name: Execute Unit Testing
|
||||
run: |
|
||||
cd applications/Chat
|
||||
cd applications/ColossalChat
|
||||
rm -rf ~/.cache/colossalai
|
||||
pytest tests/
|
||||
cd ./tests
|
||||
./test_templating.sh
|
||||
env:
|
||||
NCCL_SHM_DISABLE: 1
|
||||
MAX_JOBS: 8
|
||||
|
|
|
@ -159,3 +159,7 @@ coverage.xml
|
|||
# ignore testmon and coverage files
|
||||
.coverage
|
||||
.testmondata*
|
||||
|
||||
# log, test files - ColossalChat
|
||||
applications/ColossalChat/logs
|
||||
applications/ColossalChat/tests/logs
|
||||
|
|
2
LICENSE
2
LICENSE
|
@ -551,4 +551,4 @@ Copyright 2021- HPC-AI Technology Inc. All rights reserved.
|
|||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
THE SOFTWARE.
|
||||
|
|
36
README.md
36
README.md
|
@ -25,16 +25,15 @@
|
|||
</div>
|
||||
|
||||
## Latest News
|
||||
* [2024/03] [314 Billion Parameter Grok-1 Inference Accelerated by 3.8x, Efficient and Easy-to-Use PyTorch+HuggingFace version is Here](https://hpc-ai.com/blog/314-billion-parameter-grok-1-inference-accelerated-by-3.8x-efficient-and-easy-to-use-pytorchhuggingface-version-is-here)
|
||||
* [2024/03] [Open-Sora: Revealing Complete Model Parameters, Training Details, and Everything for Sora-like Video Generation Models](https://hpc-ai.com/blog/open-sora-v1.0)
|
||||
* [2024/03] [Open-Sora:Sora Replication Solution with 46% Cost Reduction, Sequence Expansion to Nearly a Million](https://hpc-ai.com/blog/open-sora)
|
||||
* [2024/01] [Inference Performance Improved by 46%, Open Source Solution Breaks the Length Limit of LLM for Multi-Round Conversations](https://hpc-ai.com/blog/Colossal-AI-SwiftInfer)
|
||||
* [2024/01] [Construct Refined 13B Private Model With Just $5000 USD, Upgraded Colossal-AI Llama-2 Open Source](https://hpc-ai.com/blog/colossal-llama-2-13b)
|
||||
* [2023/11] [Enhanced MoE Parallelism, Open-source MoE Model Training Can Be 9 Times More Efficient](https://www.hpc-ai.tech/blog/enhanced-moe-parallelism-open-source-moe-model-training-can-be-9-times-more-efficient)
|
||||
* [2023/09] [One Half-Day of Training Using a Few Hundred Dollars Yields Similar Results to Mainstream Large Models, Open-Source and Commercial-Free Domain-Specific LLM Solution](https://www.hpc-ai.tech/blog/one-half-day-of-training-using-a-few-hundred-dollars-yields-similar-results-to-mainstream-large-models-open-source-and-commercial-free-domain-specific-llm-solution)
|
||||
* [2023/09] [70 Billion Parameter LLaMA2 Model Training Accelerated by 195%](https://www.hpc-ai.tech/blog/70b-llama2-training)
|
||||
* [2023/07] [HPC-AI Tech Raises 22 Million USD in Series A Funding](https://www.hpc-ai.tech/blog/hpc-ai-tech-raises-22-million-usd-in-series-a-funding-to-fuel-team-expansion-and-business-growth)
|
||||
* [2023/07] [65B Model Pretraining Accelerated by 38%, Best Practices for Building LLaMA-Like Base Models Open-Source](https://www.hpc-ai.tech/blog/large-model-pretraining)
|
||||
* [2023/03] [ColossalChat: An Open-Source Solution for Cloning ChatGPT With a Complete RLHF Pipeline](https://medium.com/@yangyou_berkeley/colossalchat-an-open-source-solution-for-cloning-chatgpt-with-a-complete-rlhf-pipeline-5edf08fb538b)
|
||||
* [2023/03] [Intel and Colossal-AI Partner to Deliver Cost-Efficient Open-Source Solution for Protein Folding Structure Prediction](https://www.hpc-ai.tech/blog/intel-habana)
|
||||
* [2023/03] [AWS and Google Fund Colossal-AI with Startup Cloud Programs](https://www.hpc-ai.tech/blog/aws-and-google-fund-colossal-ai-with-startup-cloud-programs)
|
||||
|
||||
## Table of Contents
|
||||
<ul>
|
||||
|
@ -43,6 +42,7 @@
|
|||
<li>
|
||||
<a href="#Colossal-AI-in-the-Real-World">Colossal-AI for Real World Applications</a>
|
||||
<ul>
|
||||
<li><a href="#Open-Sora">Open-Sora: Revealing Complete Model Parameters, Training Details, and Everything for Sora-like Video Generation Models</a></li>
|
||||
<li><a href="#Colossal-LLaMA-2">Colossal-LLaMA-2: One Half-Day of Training Using a Few Hundred Dollars Yields Similar Results to Mainstream Large Models, Open-Source and Commercial-Free Domain-Specific Llm Solution</a></li>
|
||||
<li><a href="#ColossalChat">ColossalChat: An Open-Source Solution for Cloning ChatGPT With a Complete RLHF Pipeline</a></li>
|
||||
<li><a href="#AIGC">AIGC: Acceleration of Stable Diffusion</a></li>
|
||||
|
@ -73,6 +73,7 @@
|
|||
<li>
|
||||
<a href="#Inference">Inference</a>
|
||||
<ul>
|
||||
<li><a href="#Grok-1">Grok-1: 314B model of PyTorch + HuggingFace Inference</a></li>
|
||||
<li><a href="#SwiftInfer">SwiftInfer:Breaks the Length Limit of LLM for Multi-Round Conversations with 46% Acceleration</a></li>
|
||||
<li><a href="#GPT-3-Inference">GPT-3</a></li>
|
||||
<li><a href="#OPT-Serving">OPT-175B Online Serving for Text Generation</a></li>
|
||||
|
@ -126,6 +127,21 @@ distributed training and inference in a few lines.
|
|||
<p align="right">(<a href="#top">back to top</a>)</p>
|
||||
|
||||
## Colossal-AI in the Real World
|
||||
### Open-Sora
|
||||
|
||||
[Open-Sora](https://github.com/hpcaitech/Open-Sora):Revealing Complete Model Parameters, Training Details, and Everything for Sora-like Video Generation Models
|
||||
[[code]](https://github.com/hpcaitech/Open-Sora)
|
||||
[[blog]](https://hpc-ai.com/blog/open-sora-v1.0)
|
||||
[[HuggingFace model weights]](https://huggingface.co/hpcai-tech/Open-Sora)
|
||||
[[Demo]](https://github.com/hpcaitech/Open-Sora?tab=readme-ov-file#-latest-demo)
|
||||
|
||||
<div align="center">
|
||||
<a href="https://www.youtube.com/watch?v=iDTxepqixuc">
|
||||
<img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/applications/sora/sora-demo.png" width="700" />
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="right">(<a href="#top">back to top</a>)</p>
|
||||
|
||||
### Colossal-LLaMA-2
|
||||
|
||||
|
@ -351,6 +367,18 @@ Please visit our [documentation](https://www.colossalai.org/) and [examples](htt
|
|||
|
||||
|
||||
## Inference
|
||||
### Grok-1
|
||||
<p id="Grok-1" align="center">
|
||||
<img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/examples/images/grok-1-inference.jpg" width=600/>
|
||||
</p>
|
||||
|
||||
- 314 Billion Parameter Grok-1 Inference Accelerated by 3.8x, an easy-to-use Python + PyTorch + HuggingFace version for Inference.
|
||||
|
||||
[[code]](https://github.com/hpcaitech/ColossalAI/tree/main/examples/language/grok-1)
|
||||
[[blog]](https://hpc-ai.com/blog/314-billion-parameter-grok-1-inference-accelerated-by-3.8x-efficient-and-easy-to-use-pytorchhuggingface-version-is-here)
|
||||
[[HuggingFace Grok-1 PyTorch model weights]](https://huggingface.co/hpcai-tech/grok-1)
|
||||
[[ModelScope Grok-1 PyTorch model weights]](https://www.modelscope.cn/models/colossalai/grok-1-pytorch/summary)
|
||||
|
||||
<p id="SwiftInfer" align="center">
|
||||
<img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/SwiftInfer.jpg" width=800/>
|
||||
</p>
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
# Benchmarks
|
||||
|
||||
## Benchmark OPT with LoRA on dummy prompt data
|
||||
|
||||
We provide various OPT models (string in parentheses is the corresponding model name used in this script):
|
||||
|
||||
- OPT-125M (125m)
|
||||
- OPT-350M (350m)
|
||||
- OPT-700M (700m)
|
||||
- OPT-1.3B (1.3b)
|
||||
- OPT-2.7B (2.7b)
|
||||
- OPT-3.5B (3.5b)
|
||||
- OPT-5.5B (5.5b)
|
||||
- OPT-6.7B (6.7b)
|
||||
- OPT-10B (10b)
|
||||
- OPT-13B (13b)
|
||||
|
||||
We also provide various training strategies:
|
||||
|
||||
- ddp: torch DDP
|
||||
- colossalai_gemini: ColossalAI GeminiDDP with `placement_policy="cuda"`, like zero3
|
||||
- colossalai_gemini_cpu: ColossalAI GeminiDDP with `placement_policy="cpu"`, like zero3-offload
|
||||
- colossalai_zero2: ColossalAI zero2
|
||||
- colossalai_zero2_cpu: ColossalAI zero2-offload
|
||||
- colossalai_zero1: ColossalAI zero1
|
||||
- colossalai_zero1_cpu: ColossalAI zero1-offload
|
||||
|
||||
We only support `torchrun` to launch now. E.g.
|
||||
|
||||
```bash
|
||||
# run OPT-125M with no lora (lora_rank=0) on single-node single-GPU with min batch size
|
||||
torchrun --standalone --nproc_per_node 1 benchmark_opt_lora_dummy.py \
|
||||
--model 125m --critic_model 125m --strategy ddp \
|
||||
--experience_batch_size 1 --train_batch_size 1 --lora_rank 0
|
||||
# run Actor (OPT-1.3B) and Critic (OPT-350M) with lora_rank=4 on single-node 4-GPU
|
||||
torchrun --standalone --nproc_per_node 4 benchmark_opt_lora_dummy.py \
|
||||
--model 1.3b --critic_model 350m --strategy colossalai_zero2 --lora_rank 4
|
||||
```
|
|
@ -1,208 +0,0 @@
|
|||
import argparse
|
||||
from copy import deepcopy
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
from coati.models.base import RewardModel
|
||||
from coati.models.opt import OPTActor, OPTCritic
|
||||
from coati.trainer import PPOTrainer
|
||||
from coati.trainer.callbacks import PerformanceEvaluator
|
||||
from coati.trainer.strategies import DDPStrategy, GeminiStrategy, LowLevelZeroStrategy, Strategy
|
||||
from torch.optim import Adam
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoTokenizer
|
||||
from transformers.models.opt.configuration_opt import OPTConfig
|
||||
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
|
||||
|
||||
def get_model_numel(model: nn.Module, strategy: Strategy) -> int:
|
||||
numel = sum(p.numel() for p in model.parameters())
|
||||
if isinstance(strategy, GeminiStrategy) and strategy.shard_init:
|
||||
numel *= dist.get_world_size()
|
||||
return numel
|
||||
|
||||
|
||||
def preprocess_batch(samples) -> dict:
|
||||
input_ids = torch.stack(samples)
|
||||
attention_mask = torch.ones_like(input_ids, dtype=torch.long)
|
||||
return {"input_ids": input_ids, "attention_mask": attention_mask}
|
||||
|
||||
|
||||
def print_rank_0(*args, **kwargs) -> None:
|
||||
if dist.get_rank() == 0:
|
||||
print(*args, **kwargs)
|
||||
|
||||
|
||||
def print_model_numel(model_dict: dict) -> None:
|
||||
B = 1024**3
|
||||
M = 1024**2
|
||||
K = 1024
|
||||
outputs = ""
|
||||
for name, numel in model_dict.items():
|
||||
outputs += f"{name}: "
|
||||
if numel >= B:
|
||||
outputs += f"{numel / B:.2f} B\n"
|
||||
elif numel >= M:
|
||||
outputs += f"{numel / M:.2f} M\n"
|
||||
elif numel >= K:
|
||||
outputs += f"{numel / K:.2f} K\n"
|
||||
else:
|
||||
outputs += f"{numel}\n"
|
||||
print_rank_0(outputs)
|
||||
|
||||
|
||||
def get_gpt_config(model_name: str) -> OPTConfig:
|
||||
model_map = {
|
||||
"125m": OPTConfig.from_pretrained("facebook/opt-125m"),
|
||||
"350m": OPTConfig(hidden_size=1024, ffn_dim=4096, num_hidden_layers=24, num_attention_heads=16),
|
||||
"700m": OPTConfig(hidden_size=1280, ffn_dim=5120, num_hidden_layers=36, num_attention_heads=20),
|
||||
"1.3b": OPTConfig.from_pretrained("facebook/opt-1.3b"),
|
||||
"2.7b": OPTConfig.from_pretrained("facebook/opt-2.7b"),
|
||||
"3.5b": OPTConfig(hidden_size=3072, ffn_dim=12288, num_hidden_layers=32, num_attention_heads=32),
|
||||
"5.5b": OPTConfig(hidden_size=3840, ffn_dim=15360, num_hidden_layers=32, num_attention_heads=32),
|
||||
"6.7b": OPTConfig.from_pretrained("facebook/opt-6.7b"),
|
||||
"10b": OPTConfig(hidden_size=5120, ffn_dim=20480, num_hidden_layers=32, num_attention_heads=32),
|
||||
"13b": OPTConfig.from_pretrained("facebook/opt-13b"),
|
||||
}
|
||||
try:
|
||||
return model_map[model_name]
|
||||
except KeyError:
|
||||
raise ValueError(f'Unknown model "{model_name}"')
|
||||
|
||||
|
||||
def main(args):
|
||||
if args.strategy == "ddp":
|
||||
strategy = DDPStrategy()
|
||||
elif args.strategy == "colossalai_gemini":
|
||||
strategy = GeminiStrategy(placement_policy="static",initial_scale=2**5)
|
||||
elif args.strategy == "colossalai_gemini_cpu":
|
||||
strategy = GeminiStrategy(placement_policy="static", offload_optim_frac=1.0, offload_param_frac=1.0, initial_scale=2**5)
|
||||
elif args.strategy == "colossalai_zero2":
|
||||
strategy = LowLevelZeroStrategy(stage=2, placement_policy="cuda")
|
||||
elif args.strategy == "colossalai_zero2_cpu":
|
||||
strategy = LowLevelZeroStrategy(stage=2, placement_policy="cpu")
|
||||
elif args.strategy == "colossalai_zero1":
|
||||
strategy = LowLevelZeroStrategy(stage=1, placement_policy="cuda")
|
||||
elif args.strategy == "colossalai_zero1_cpu":
|
||||
strategy = LowLevelZeroStrategy(stage=1, placement_policy="cpu")
|
||||
else:
|
||||
raise ValueError(f'Unsupported strategy "{args.strategy}"')
|
||||
|
||||
torch.cuda.set_per_process_memory_fraction(args.cuda_mem_frac)
|
||||
|
||||
model_config = get_gpt_config(args.model)
|
||||
critic_config = get_gpt_config(args.critic_model)
|
||||
with strategy.model_init_context():
|
||||
actor = OPTActor(config=model_config, lora_rank=args.lora_rank).cuda()
|
||||
critic = OPTCritic(config=critic_config, lora_rank=args.lora_rank).cuda()
|
||||
|
||||
initial_model = deepcopy(actor).cuda().half()
|
||||
reward_model = RewardModel(deepcopy(critic.model), deepcopy(critic.value_head)).cuda().half()
|
||||
|
||||
if args.use_kernels:
|
||||
from coati.kernels import convert_to_xformer_model
|
||||
|
||||
actor, critic, initial_model, reward_model = map(
|
||||
convert_to_xformer_model, (actor, critic, initial_model, reward_model)
|
||||
)
|
||||
|
||||
actor_numel = get_model_numel(actor, strategy)
|
||||
critic_numel = get_model_numel(critic, strategy)
|
||||
initial_model_numel = get_model_numel(initial_model, strategy)
|
||||
reward_model_numel = get_model_numel(reward_model, strategy)
|
||||
print_model_numel(
|
||||
{
|
||||
"Actor": actor_numel,
|
||||
"Critic": critic_numel,
|
||||
"Initial model": initial_model_numel,
|
||||
"Reward model": reward_model_numel,
|
||||
}
|
||||
)
|
||||
performance_evaluator = PerformanceEvaluator(
|
||||
actor_numel,
|
||||
critic_numel,
|
||||
initial_model_numel,
|
||||
reward_model_numel,
|
||||
enable_grad_checkpoint=False,
|
||||
ignore_episodes=1,
|
||||
)
|
||||
|
||||
if args.strategy.startswith("colossalai"):
|
||||
actor_optim = HybridAdam(actor.parameters(), lr=5e-6)
|
||||
critic_optim = HybridAdam(critic.parameters(), lr=5e-6)
|
||||
else:
|
||||
actor_optim = Adam(actor.parameters(), lr=5e-6)
|
||||
critic_optim = Adam(critic.parameters(), lr=5e-6)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
tokenizer.padding_side = "left"
|
||||
|
||||
(actor, actor_optim), (critic, critic_optim) = strategy.prepare((actor, actor_optim), (critic, critic_optim))
|
||||
|
||||
random_prompts = torch.randint(tokenizer.vocab_size, (1000, 256), device=torch.cuda.current_device())
|
||||
dataloader = DataLoader(
|
||||
random_prompts, batch_size=args.experience_batch_size, shuffle=True, collate_fn=preprocess_batch
|
||||
)
|
||||
|
||||
trainer = PPOTrainer(
|
||||
strategy,
|
||||
actor,
|
||||
critic,
|
||||
reward_model,
|
||||
initial_model,
|
||||
actor_optim,
|
||||
critic_optim,
|
||||
tokenizer=tokenizer,
|
||||
ptx_coef=0,
|
||||
train_batch_size=args.train_batch_size,
|
||||
offload_inference_models=args.offload_inference_models,
|
||||
max_length=512,
|
||||
do_sample=True,
|
||||
temperature=1.0,
|
||||
top_k=50,
|
||||
use_cache=True,
|
||||
callbacks=[performance_evaluator],
|
||||
)
|
||||
|
||||
trainer.fit(
|
||||
prompt_dataloader=dataloader,
|
||||
pretrain_dataloader=None,
|
||||
num_episodes=args.num_episodes,
|
||||
num_update_steps=args.num_update_steps,
|
||||
num_collect_steps=args.num_collect_steps,
|
||||
)
|
||||
|
||||
print_rank_0(f"Peak CUDA mem: {torch.cuda.max_memory_allocated()/1024**3:.2f} GB")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--model", default="125m")
|
||||
parser.add_argument("--critic_model", default="125m")
|
||||
parser.add_argument(
|
||||
"--strategy",
|
||||
choices=[
|
||||
"ddp",
|
||||
"colossalai_gemini",
|
||||
"colossalai_gemini_cpu",
|
||||
"colossalai_zero2",
|
||||
"colossalai_zero2_cpu",
|
||||
"colossalai_zero1",
|
||||
"colossalai_zero1_cpu",
|
||||
],
|
||||
default="ddp",
|
||||
)
|
||||
parser.add_argument("--num_episodes", type=int, default=3)
|
||||
parser.add_argument("--num_collect_steps", type=int, default=8)
|
||||
parser.add_argument("--num_update_steps", type=int, default=1)
|
||||
parser.add_argument("--train_batch_size", type=int, default=8)
|
||||
parser.add_argument("--experience_batch_size", type=int, default=8)
|
||||
parser.add_argument("--lora_rank", type=int, default=0)
|
||||
parser.add_argument("--cuda_mem_frac", type=float, default=1.0)
|
||||
parser.add_argument("--offload_inference_models", action="store_true", default=False)
|
||||
parser.add_argument("--use_kernels", action="store_true", default=False)
|
||||
args = parser.parse_args()
|
||||
main(args)
|
|
@ -1,13 +0,0 @@
|
|||
from .prompt_dataset import PromptDataset
|
||||
from .reward_dataset import HhRlhfDataset, RmStaticDataset
|
||||
from .sft_dataset import SFTDataset, SupervisedDataset
|
||||
from .utils import is_rank_0
|
||||
|
||||
__all__ = [
|
||||
"RmStaticDataset",
|
||||
"HhRlhfDataset",
|
||||
"SFTDataset",
|
||||
"SupervisedDataset",
|
||||
"PromptDataset",
|
||||
"is_rank_0",
|
||||
]
|
|
@ -1,89 +0,0 @@
|
|||
# Copyright 2023 lm-sys@FastChat
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import dataclasses
|
||||
from enum import Enum, auto
|
||||
from typing import List
|
||||
|
||||
|
||||
class SeparatorStyle(Enum):
|
||||
ADD_EOS_TOKEN = auto()
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Conversation:
|
||||
system: str
|
||||
roles: List[str]
|
||||
messages: List[List[str]]
|
||||
offset: int
|
||||
sep_style: SeparatorStyle = SeparatorStyle.ADD_EOS_TOKEN
|
||||
sep: str = "</s>"
|
||||
|
||||
skip_next: bool = False
|
||||
|
||||
def get_prompt(self):
|
||||
if self.sep_style == SeparatorStyle.ADD_EOS_TOKEN:
|
||||
ret = self.system
|
||||
for role, message in self.messages:
|
||||
if message:
|
||||
ret += role + ": " + message + self.sep
|
||||
else:
|
||||
ret += role + ": "
|
||||
return ret
|
||||
else:
|
||||
raise ValueError(f"Invalid style: {self.sep_style}")
|
||||
|
||||
def append_message(self, role, message):
|
||||
self.messages.append([role, message])
|
||||
|
||||
def to_gradio_chatbot(self):
|
||||
ret = []
|
||||
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
||||
if i % 2 == 0:
|
||||
ret.append([msg, None])
|
||||
else:
|
||||
ret[-1][-1] = msg
|
||||
return ret
|
||||
|
||||
def copy(self):
|
||||
return Conversation(
|
||||
system=self.system,
|
||||
roles=self.roles,
|
||||
messages=[[x, y] for x, y in self.messages],
|
||||
offset=self.offset,
|
||||
sep_style=self.sep_style,
|
||||
sep=self.sep,
|
||||
)
|
||||
|
||||
def dict(self):
|
||||
return {
|
||||
"system": self.system,
|
||||
"roles": self.roles,
|
||||
"messages": self.messages,
|
||||
"offset": self.offset,
|
||||
"sep": self.sep,
|
||||
}
|
||||
|
||||
|
||||
conv = Conversation(
|
||||
system="A chat between a curious human and an artificial intelligence assistant. "
|
||||
"The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
||||
roles=("Human", "Assistant"),
|
||||
messages=(),
|
||||
offset=0,
|
||||
sep_style=SeparatorStyle.ADD_EOS_TOKEN,
|
||||
sep="</s>",
|
||||
)
|
||||
|
||||
default_conversation = conv
|
|
@ -1,45 +0,0 @@
|
|||
from collections import defaultdict
|
||||
from typing import Dict
|
||||
|
||||
import torch
|
||||
import transformers
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
from colossalai.logging import get_dist_logger
|
||||
|
||||
from .utils import jload
|
||||
|
||||
|
||||
class PromptDataset(Dataset):
|
||||
"""Dataset for supervised fine-tuning."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
data_path: str,
|
||||
tokenizer: transformers.PreTrainedTokenizer,
|
||||
max_datasets_size: int = None,
|
||||
max_length: int = 96,
|
||||
):
|
||||
super(PromptDataset, self).__init__()
|
||||
self.keyed_prompt = defaultdict(list)
|
||||
self.logger = get_dist_logger()
|
||||
self.logger.info("Loading data...")
|
||||
list_data_dict = jload(data_path)
|
||||
self.logger.info(f"Loaded {len(list_data_dict)} examples.")
|
||||
|
||||
if max_datasets_size is not None:
|
||||
self.logger.info(f"Limiting dataset to {max_datasets_size} examples.")
|
||||
list_data_dict = list_data_dict[:max_datasets_size]
|
||||
|
||||
instructions = [data_dict["instruction"] for data_dict in list_data_dict]
|
||||
tokens = tokenizer(
|
||||
instructions, return_tensors="pt", max_length=max_length, padding="max_length", truncation=True
|
||||
)
|
||||
for k, tensor in tokens.items():
|
||||
self.keyed_prompt[k] = tensor.to(torch.cuda.current_device()).unbind()
|
||||
|
||||
def __len__(self):
|
||||
return len(self.keyed_prompt["input_ids"])
|
||||
|
||||
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
|
||||
return {k: v[i] for k, v in self.keyed_prompt.items()}
|
|
@ -1,88 +0,0 @@
|
|||
from typing import Callable
|
||||
|
||||
from torch.utils.data import Dataset
|
||||
from tqdm import tqdm
|
||||
|
||||
from .utils import is_rank_0
|
||||
|
||||
|
||||
# Dahoas/rm-static
|
||||
class RmStaticDataset(Dataset):
|
||||
"""
|
||||
Dataset for reward model
|
||||
|
||||
Args:
|
||||
dataset: dataset for reward model
|
||||
tokenizer: tokenizer for reward model
|
||||
max_length: max length of input
|
||||
special_token: special token at the end of sentence
|
||||
"""
|
||||
|
||||
def __init__(self, dataset, tokenizer: Callable, max_length: int, special_token=None) -> None:
|
||||
super().__init__()
|
||||
self.end_token = tokenizer.eos_token if special_token is None else special_token
|
||||
|
||||
chosen = [data["prompt"] + data["chosen"] + self.end_token for data in tqdm(dataset, disable=not is_rank_0())]
|
||||
chosen_token = tokenizer(
|
||||
chosen, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt"
|
||||
)
|
||||
self.chosen = {"input_ids": chosen_token["input_ids"], "attention_mask": chosen_token["attention_mask"]}
|
||||
|
||||
reject = [data["prompt"] + data["rejected"] + self.end_token for data in tqdm(dataset, disable=not is_rank_0())]
|
||||
reject_token = tokenizer(
|
||||
reject, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt"
|
||||
)
|
||||
self.reject = {"input_ids": reject_token["input_ids"], "attention_mask": reject_token["attention_mask"]}
|
||||
|
||||
def __len__(self):
|
||||
length = self.chosen["input_ids"].shape[0]
|
||||
return length
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return (
|
||||
self.chosen["input_ids"][idx],
|
||||
self.chosen["attention_mask"][idx],
|
||||
self.reject["input_ids"][idx],
|
||||
self.reject["attention_mask"][idx],
|
||||
)
|
||||
|
||||
|
||||
# Anthropic/hh-rlhf
|
||||
class HhRlhfDataset(Dataset):
|
||||
"""
|
||||
Dataset for reward model
|
||||
|
||||
Args:
|
||||
dataset: dataset for reward model
|
||||
tokenizer: tokenizer for reward model
|
||||
max_length: max length of input
|
||||
special_token: special token at the end of sentence
|
||||
"""
|
||||
|
||||
def __init__(self, dataset, tokenizer: Callable, max_length: int, special_token=None) -> None:
|
||||
super().__init__()
|
||||
self.end_token = tokenizer.eos_token if special_token is None else special_token
|
||||
|
||||
chosen = [data["chosen"] + self.end_token for data in tqdm(dataset, disable=not is_rank_0())]
|
||||
chosen_token = tokenizer(
|
||||
chosen, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt"
|
||||
)
|
||||
self.chosen = {"input_ids": chosen_token["input_ids"], "attention_mask": chosen_token["attention_mask"]}
|
||||
|
||||
reject = [data["rejected"] + self.end_token for data in tqdm(dataset, disable=not is_rank_0())]
|
||||
reject_token = tokenizer(
|
||||
reject, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt"
|
||||
)
|
||||
self.reject = {"input_ids": reject_token["input_ids"], "attention_mask": reject_token["attention_mask"]}
|
||||
|
||||
def __len__(self):
|
||||
length = self.chosen["input_ids"].shape[0]
|
||||
return length
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return (
|
||||
self.chosen["input_ids"][idx],
|
||||
self.chosen["attention_mask"][idx],
|
||||
self.reject["input_ids"][idx],
|
||||
self.reject["attention_mask"][idx],
|
||||
)
|
|
@ -1,200 +0,0 @@
|
|||
# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
from typing import Dict, Optional, Sequence, Tuple
|
||||
|
||||
import torch
|
||||
from coati.models.chatglm.chatglm_tokenizer import ChatGLMTokenizer
|
||||
from torch.utils.data import Dataset
|
||||
from tqdm import tqdm
|
||||
from transformers import PreTrainedTokenizer
|
||||
|
||||
from colossalai.logging import get_dist_logger
|
||||
|
||||
from .utils import is_rank_0, jload
|
||||
|
||||
logger = get_dist_logger()
|
||||
|
||||
IGNORE_INDEX = -100
|
||||
PROMPT_DICT = {
|
||||
"prompt_input": (
|
||||
"Below is an instruction that describes a task, paired with an input that provides further context. "
|
||||
"Write a response that appropriately completes the request.\n\n"
|
||||
"### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:"
|
||||
),
|
||||
"prompt_no_input": (
|
||||
"Below is an instruction that describes a task. "
|
||||
"Write a response that appropriately completes the request.\n\n"
|
||||
"### Instruction:\n{instruction}\n\n### Response:"
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def _preprocess(
|
||||
sources: Sequence[str],
|
||||
targets: Sequence[str],
|
||||
tokenizer: PreTrainedTokenizer,
|
||||
max_length: int,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Preprocess the data by tokenizing."""
|
||||
sequences = [s + t + tokenizer.eos_token for s, t in zip(sources, targets)]
|
||||
sequences_token = tokenizer(
|
||||
sequences, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt", add_special_tokens=False
|
||||
)
|
||||
|
||||
sources_token = tokenizer(
|
||||
sources, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt", add_special_tokens=False
|
||||
)
|
||||
|
||||
assert sequences_token["attention_mask"].dim() == 2, "seq2seq model should be preprocessed differently"
|
||||
labels = copy.deepcopy(sequences_token["input_ids"])
|
||||
for i in range(labels.shape[0]):
|
||||
source_len = sources_token["attention_mask"][i].sum().item()
|
||||
pad_len = max_length - sequences_token["attention_mask"][i].sum().item()
|
||||
if tokenizer.padding_side == "right":
|
||||
# |prompt|completion|eos|pad|
|
||||
labels[i][:source_len] = IGNORE_INDEX
|
||||
if pad_len>0:
|
||||
labels[i][-pad_len:] = IGNORE_INDEX
|
||||
elif tokenizer.padding_side == "left":
|
||||
# |pad|prompt|completion|eos|
|
||||
labels[i][: pad_len + source_len] = IGNORE_INDEX
|
||||
else:
|
||||
raise RuntimeError()
|
||||
|
||||
return sequences_token["input_ids"], labels, sequences_token["attention_mask"]
|
||||
|
||||
|
||||
def _preprocess_chatglm(
|
||||
sources: Sequence[str],
|
||||
targets: Sequence[str],
|
||||
tokenizer: PreTrainedTokenizer,
|
||||
max_length: int,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Preprocess the data by tokenizing.
|
||||
None for attention mask, ChatGLM will calculate attention mask according to input ids
|
||||
"""
|
||||
|
||||
labels = []
|
||||
input_ids = []
|
||||
for source, target in zip(sources, targets):
|
||||
source_id = tokenizer.encode(text=source, add_special_tokens=False)
|
||||
target_id = tokenizer.encode(text=target, add_special_tokens=False)
|
||||
input_id = tokenizer.build_inputs_with_special_tokens(source_id, target_id)
|
||||
# truncate
|
||||
sp_token_list = [tokenizer.gmask_token_id, tokenizer.bos_token_id]
|
||||
truncate_length = max(0, len(input_id) - max_length)
|
||||
input_id = input_id[truncate_length:]
|
||||
if truncate_length == len(source_id) + 1:
|
||||
input_id = sp_token_list + input_id[1:]
|
||||
elif truncate_length > len(source_id) + 1:
|
||||
input_id = sp_token_list + input_id[2:]
|
||||
|
||||
context_length = input_id.index(tokenizer.bos_token_id)
|
||||
mask_position = context_length - 1
|
||||
label = [IGNORE_INDEX] * context_length + input_id[mask_position + 1 :]
|
||||
|
||||
pad_len = max_length - len(input_id)
|
||||
input_id = input_id + [tokenizer.pad_token_id] * pad_len
|
||||
input_ids.append(input_id)
|
||||
labels.append(label + [IGNORE_INDEX] * pad_len)
|
||||
return torch.tensor(input_ids), torch.tensor(labels), None
|
||||
|
||||
|
||||
class SFTDataset(Dataset):
|
||||
"""
|
||||
Dataset for sft model
|
||||
|
||||
Args:
|
||||
dataset: dataset for supervised model
|
||||
tokenizer: tokenizer for supervised model
|
||||
max_length: max length of input
|
||||
"""
|
||||
|
||||
def __init__(self, dataset: Dict, tokenizer: PreTrainedTokenizer, max_length: int = 512) -> None:
|
||||
super().__init__()
|
||||
self.input_ids = []
|
||||
|
||||
sources = [data["prompt"] for data in dataset]
|
||||
targets = [data["completion"] + tokenizer.eos_token for data in tqdm(dataset, disable=not is_rank_0())]
|
||||
|
||||
logger.info("Tokenizing inputs... This may take some time...")
|
||||
if isinstance(tokenizer, ChatGLMTokenizer):
|
||||
self.input_ids, self.labels, self.attention_mask = _preprocess_chatglm(
|
||||
sources, targets, tokenizer, max_length
|
||||
)
|
||||
else:
|
||||
self.input_ids, self.labels, self.attention_mask = _preprocess(sources, targets, tokenizer, max_length)
|
||||
|
||||
logger.info("Loaded dataset.")
|
||||
|
||||
def __len__(self):
|
||||
length = self.input_ids.shape[0]
|
||||
return length
|
||||
|
||||
def __getitem__(self, idx):
|
||||
if self.attention_mask is not None:
|
||||
return dict(input_ids=self.input_ids[idx], labels=self.labels[idx], attention_mask=self.attention_mask[idx])
|
||||
else:
|
||||
return dict(input_ids=self.input_ids[idx], labels=self.labels[idx])
|
||||
|
||||
|
||||
class SupervisedDataset(Dataset):
|
||||
"""Dataset for supervised fine-tuning."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
data_path: str,
|
||||
tokenizer: PreTrainedTokenizer,
|
||||
max_datasets_size: Optional[int] = None,
|
||||
max_length: int = 512,
|
||||
):
|
||||
super().__init__()
|
||||
logger.info("Loading data...")
|
||||
list_data_dict = jload(data_path)
|
||||
logger.info(f"Loaded {len(list_data_dict)} examples.")
|
||||
|
||||
if max_datasets_size is not None:
|
||||
logger.info(f"Limiting dataset to {max_datasets_size} examples.")
|
||||
list_data_dict = list_data_dict[:max_datasets_size]
|
||||
|
||||
logger.info("Formatting inputs...")
|
||||
prompt_input, prompt_no_input = PROMPT_DICT["prompt_input"], PROMPT_DICT["prompt_no_input"]
|
||||
sources = [
|
||||
prompt_input.format_map(example) if "input" in example else prompt_no_input.format_map(example)
|
||||
for example in list_data_dict
|
||||
]
|
||||
targets = [example["output"] + tokenizer.eos_token for example in list_data_dict]
|
||||
|
||||
logger.info("Tokenizing inputs... This may take some time...")
|
||||
if isinstance(tokenizer, ChatGLMTokenizer):
|
||||
self.input_ids, self.labels, self.attention_mask = _preprocess_chatglm(
|
||||
sources, targets, tokenizer, max_length
|
||||
)
|
||||
else:
|
||||
self.input_ids, self.labels, self.attention_mask = _preprocess(sources, targets, tokenizer, max_length)
|
||||
|
||||
logger.info("Loaded dataset.")
|
||||
|
||||
def __len__(self):
|
||||
length = self.input_ids.shape[0]
|
||||
return length
|
||||
|
||||
def __getitem__(self, idx):
|
||||
if self.attention_mask is not None:
|
||||
return dict(input_ids=self.input_ids[idx], labels=self.labels[idx], attention_mask=self.attention_mask[idx])
|
||||
else:
|
||||
return dict(input_ids=self.input_ids[idx], labels=self.labels[idx])
|
|
@ -1,22 +0,0 @@
|
|||
import io
|
||||
import json
|
||||
|
||||
import torch.distributed as dist
|
||||
|
||||
|
||||
def is_rank_0() -> bool:
|
||||
return not dist.is_initialized() or dist.get_rank() == 0
|
||||
|
||||
|
||||
def _make_r_io_base(f, mode: str):
|
||||
if not isinstance(f, io.IOBase):
|
||||
f = open(f, mode=mode)
|
||||
return f
|
||||
|
||||
|
||||
def jload(f, mode="r"):
|
||||
"""Load a .json file into a dictionary."""
|
||||
f = _make_r_io_base(f, mode)
|
||||
jdict = json.load(f)
|
||||
f.close()
|
||||
return jdict
|
|
@ -1,71 +0,0 @@
|
|||
import torch
|
||||
import torch.nn.functional as F
|
||||
from coati.models.base import Actor, Critic, RewardModel
|
||||
from coati.models.generation import generate
|
||||
from coati.models.utils import calc_action_log_probs, compute_reward
|
||||
from transformers import PreTrainedTokenizer
|
||||
|
||||
from .base import Experience, ExperienceMaker
|
||||
|
||||
|
||||
class NaiveExperienceMaker(ExperienceMaker):
|
||||
"""
|
||||
Naive experience maker.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
actor: Actor,
|
||||
critic: Critic,
|
||||
reward_model: RewardModel,
|
||||
initial_model: Actor,
|
||||
tokenizer: PreTrainedTokenizer,
|
||||
kl_coef: float = 0.1,
|
||||
) -> None:
|
||||
super().__init__(actor, critic, reward_model, initial_model)
|
||||
self.tokenizer = tokenizer
|
||||
self.kl_coef = kl_coef
|
||||
|
||||
@torch.no_grad()
|
||||
def make_experience(self, input_ids: torch.Tensor, **generate_kwargs) -> Experience:
|
||||
self.actor.eval()
|
||||
self.critic.eval()
|
||||
self.initial_model.eval()
|
||||
self.reward_model.eval()
|
||||
|
||||
# generate sequences
|
||||
sequences = generate(self.actor, input_ids, self.tokenizer, **generate_kwargs)
|
||||
|
||||
# calculate auxiliary tensors
|
||||
attention_mask = None
|
||||
pad_token_id = self.tokenizer.pad_token_id
|
||||
if pad_token_id is not None:
|
||||
attention_mask = sequences.not_equal(pad_token_id).to(dtype=torch.long, device=sequences.device)
|
||||
|
||||
input_len = input_ids.size(1)
|
||||
eos_token_id = self.tokenizer.eos_token_id
|
||||
if eos_token_id is None:
|
||||
action_mask = torch.ones_like(sequences, dtype=torch.bool)
|
||||
else:
|
||||
# left padding may be applied, only mask action
|
||||
action_mask = (sequences[:, input_len:] == eos_token_id).cumsum(dim=-1) == 0
|
||||
action_mask = F.pad(action_mask, (1 + input_len, -1), value=True) # include eos token and input
|
||||
action_mask[:, :input_len] = False
|
||||
action_mask = action_mask[:, 1:]
|
||||
action_mask = action_mask[:, -(sequences.size(1) - input_len) :]
|
||||
num_actions = action_mask.size(1)
|
||||
|
||||
actor_output = self.actor(sequences, attention_mask)["logits"]
|
||||
action_log_probs = calc_action_log_probs(actor_output, sequences, num_actions)
|
||||
base_model_output = self.initial_model(sequences, attention_mask)["logits"]
|
||||
base_action_log_probs = calc_action_log_probs(base_model_output, sequences, num_actions)
|
||||
value = self.critic(sequences, attention_mask)
|
||||
r = self.reward_model(sequences, attention_mask)
|
||||
reward = compute_reward(r, self.kl_coef, action_log_probs, base_action_log_probs, action_mask=action_mask)
|
||||
|
||||
advantage = reward - value
|
||||
# TODO(ver217): maybe normalize adv
|
||||
if advantage.ndim == 1:
|
||||
advantage = advantage.unsqueeze(-1)
|
||||
|
||||
return Experience(sequences, action_log_probs, value, reward, advantage, attention_mask, action_mask)
|
|
@ -1,6 +0,0 @@
|
|||
from .wrapper import convert_to_xformer_model, recover_from_xformer_model
|
||||
|
||||
__all__ = [
|
||||
"convert_to_xformer_model",
|
||||
"recover_from_xformer_model",
|
||||
]
|
|
@ -1,90 +0,0 @@
|
|||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
import xformers.ops as xops
|
||||
from torch import Tensor
|
||||
from transformers.models.opt.modeling_opt import OPTAttention
|
||||
|
||||
|
||||
# This is modified from https://github.com/huggingface/transformers/blob/main/src/transformers/models/opt/modeling_opt.py
|
||||
class XOPTAttention(OPTAttention):
|
||||
# def _shape(self, tensor: Tensor, seq_len: int, bsz: int):
|
||||
# return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).contiguous()
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: Tensor,
|
||||
key_value_states: Optional[Tensor] = None,
|
||||
past_key_value: Optional[Tensor] = None,
|
||||
attention_mask: Optional[Tensor] = None,
|
||||
layer_head_mask: Optional[Tensor] = None,
|
||||
output_attentions: bool = False,
|
||||
) -> Tuple[Tensor, Optional[Tensor], Optional[Tuple[Tensor]]]:
|
||||
if not self.training:
|
||||
return super().forward(
|
||||
hidden_states, key_value_states, past_key_value, attention_mask, layer_head_mask, output_attentions
|
||||
)
|
||||
"""Input shape: Batch x Time x Channel"""
|
||||
assert layer_head_mask is None, "Xformers attention does not support layer_head_mask"
|
||||
assert not output_attentions, "Xformers attention does not support output_attentions"
|
||||
|
||||
# if key_value_states are provided this layer is used as a cross-attention layer
|
||||
# for the decoder
|
||||
is_cross_attention = key_value_states is not None
|
||||
|
||||
bsz, tgt_len, _ = hidden_states.size()
|
||||
|
||||
# get query proj
|
||||
query_states = self.q_proj(hidden_states)
|
||||
# get key, value proj
|
||||
if is_cross_attention and past_key_value is not None:
|
||||
# reuse k,v, cross_attentions
|
||||
key_states = past_key_value[0]
|
||||
value_states = past_key_value[1]
|
||||
elif is_cross_attention:
|
||||
# cross_attentions
|
||||
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
|
||||
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
|
||||
elif past_key_value is not None:
|
||||
# reuse k, v, self_attention
|
||||
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
||||
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
||||
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
||||
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
||||
else:
|
||||
# self_attention
|
||||
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
||||
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
||||
|
||||
if self.is_decoder:
|
||||
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
||||
# Further calls to cross_attention layer can then reuse all cross-attention
|
||||
# key/value_states (first "if" case)
|
||||
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
||||
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
||||
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
||||
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
||||
past_key_value = (key_states, value_states)
|
||||
|
||||
query_states = self._shape(query_states, tgt_len, bsz).transpose(1, 2)
|
||||
key_states = key_states.transpose(1, 2)
|
||||
value_states = value_states.transpose(1, 2)
|
||||
|
||||
attn_output = xops.memory_efficient_attention(
|
||||
query_states,
|
||||
key_states,
|
||||
value_states,
|
||||
attn_bias=xops.LowerTriangularMask(),
|
||||
p=self.dropout if self.training else 0.0,
|
||||
scale=self.scaling,
|
||||
)
|
||||
|
||||
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
|
||||
# partitioned across GPUs when using tensor-parallelism.
|
||||
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
|
||||
|
||||
attn_output = self.out_proj(attn_output)
|
||||
|
||||
attn_weights_reshaped = None
|
||||
|
||||
return attn_output, attn_weights_reshaped, past_key_value
|
|
@ -1,18 +0,0 @@
|
|||
import torch.nn as nn
|
||||
from transformers.models.opt.modeling_opt import OPTAttention
|
||||
|
||||
from .opt_attn import XOPTAttention
|
||||
|
||||
|
||||
def convert_to_xformer_model(model: nn.Module) -> nn.Module:
|
||||
for module in model.modules():
|
||||
if isinstance(module, OPTAttention):
|
||||
module.__class__ = XOPTAttention
|
||||
return model
|
||||
|
||||
|
||||
def recover_from_xformer_model(model: nn.Module) -> nn.Module:
|
||||
for module in model.modules():
|
||||
if isinstance(module, XOPTAttention):
|
||||
module.__class__ = OPTAttention
|
||||
return model
|
|
@ -1,15 +0,0 @@
|
|||
from .base import Actor, Critic, RewardModel
|
||||
from .lora import LoRAModule, convert_to_lora_module
|
||||
from .loss import LogExpLoss, LogSigLoss, PolicyLoss, ValueLoss
|
||||
|
||||
__all__ = [
|
||||
"Actor",
|
||||
"Critic",
|
||||
"RewardModel",
|
||||
"PolicyLoss",
|
||||
"ValueLoss",
|
||||
"LogSigLoss",
|
||||
"LogExpLoss",
|
||||
"LoRAModule",
|
||||
"convert_to_lora_module",
|
||||
]
|
|
@ -1,27 +0,0 @@
|
|||
from typing import Union
|
||||
|
||||
import torch.nn as nn
|
||||
|
||||
from .actor import Actor
|
||||
from .critic import Critic
|
||||
from .reward_model import RewardModel
|
||||
|
||||
|
||||
def get_base_model(model: Union[Actor, Critic, RewardModel]) -> nn.Module:
|
||||
"""Get the base model of our wrapper classes.
|
||||
For Actor, Critic and RewardModel, return ``model.model``,
|
||||
it's usually a ``transformers.PreTrainedModel``.
|
||||
|
||||
Args:
|
||||
model (nn.Module): model to get base model from
|
||||
|
||||
Returns:
|
||||
nn.Module: the base model
|
||||
"""
|
||||
assert isinstance(
|
||||
model, (Actor, Critic, RewardModel)
|
||||
), f"Expect Actor, Critic or RewardModel, got {type(model)}, use unwrap_model first."
|
||||
return model.model
|
||||
|
||||
|
||||
__all__ = ["Actor", "Critic", "RewardModel", "get_base_model"]
|
|
@ -1,33 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from ..lora import LoRAModule
|
||||
|
||||
|
||||
class Actor(LoRAModule):
|
||||
"""
|
||||
Actor model base class.
|
||||
|
||||
Args:
|
||||
model (nn.Module): Actor Model.
|
||||
lora_rank (int): LoRA rank.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(self, model: nn.Module, lora_rank: int = 0, lora_train_bias: str = "none") -> None:
|
||||
super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)
|
||||
self.model = model
|
||||
self.convert_to_lora()
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_ids: torch.LongTensor,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
**model_kwargs,
|
||||
) -> torch.Tensor:
|
||||
"""Returns model output."""
|
||||
output = self.model(input_ids, attention_mask=attention_mask, **model_kwargs)
|
||||
return output
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from ..lora import LoRAModule
|
||||
|
||||
|
||||
class Critic(LoRAModule):
|
||||
"""
|
||||
Critic model base class.
|
||||
|
||||
Args:
|
||||
model (nn.Module): Critic model.
|
||||
value_head (nn.Module): Value head to get value.
|
||||
lora_rank (int): LoRA rank.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, model: nn.Module, value_head: nn.Module, lora_rank: int = 0, lora_train_bias: str = "none"
|
||||
) -> None:
|
||||
super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)
|
||||
self.model = model
|
||||
self.value_head = value_head
|
||||
self.convert_to_lora()
|
||||
|
||||
def forward(self, sequences: torch.LongTensor, attention_mask: torch.Tensor) -> torch.Tensor:
|
||||
outputs = self.model(sequences, attention_mask=attention_mask)
|
||||
last_hidden_states = outputs["last_hidden_state"]
|
||||
sequence_lengths = torch.max(attention_mask * torch.arange(sequences.size(1), device=sequences.device), dim=1)[
|
||||
0
|
||||
]
|
||||
sequence_hidden_states = last_hidden_states[torch.arange(last_hidden_states.size(0)), sequence_lengths]
|
||||
values = self.value_head(sequence_hidden_states).squeeze(1) # ensure shape is (B, )
|
||||
return values
|
|
@ -1,46 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from ..lora import LoRAModule
|
||||
|
||||
|
||||
class RewardModel(LoRAModule):
|
||||
"""
|
||||
Reward model base class.
|
||||
|
||||
Args:
|
||||
model (nn.Module): Reward model.
|
||||
value_head (nn.Module): Value head to get reward score.
|
||||
lora_rank (int): LoRA rank.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: nn.Module,
|
||||
value_head: Optional[nn.Module] = None,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
) -> None:
|
||||
super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)
|
||||
self.model = model
|
||||
self.convert_to_lora()
|
||||
|
||||
if value_head is not None:
|
||||
if value_head.out_features != 1:
|
||||
raise ValueError("The value head of reward model's output dim should be 1!")
|
||||
self.value_head = value_head
|
||||
else:
|
||||
self.value_head = nn.Linear(model.config.n_embd, 1)
|
||||
|
||||
def forward(self, sequences: torch.LongTensor, attention_mask: torch.Tensor) -> torch.Tensor:
|
||||
outputs = self.model(sequences, attention_mask=attention_mask)
|
||||
last_hidden_states = outputs["last_hidden_state"]
|
||||
sequence_lengths = torch.max(attention_mask * torch.arange(sequences.size(1), device=sequences.device), dim=1)[
|
||||
0
|
||||
]
|
||||
sequence_hidden_states = last_hidden_states[torch.arange(last_hidden_states.size(0)), sequence_lengths]
|
||||
values = self.value_head(sequence_hidden_states).squeeze(1) # ensure shape is (B, )
|
||||
return values
|
|
@ -1,5 +0,0 @@
|
|||
from .bloom_actor import BLOOMActor
|
||||
from .bloom_critic import BLOOMCritic
|
||||
from .bloom_rm import BLOOMRM
|
||||
|
||||
__all__ = ["BLOOMActor", "BLOOMCritic", "BLOOMRM"]
|
|
@ -1,36 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
from transformers import BloomConfig, BloomForCausalLM
|
||||
|
||||
from ..base import Actor
|
||||
|
||||
|
||||
class BLOOMActor(Actor):
|
||||
"""
|
||||
BLOOM Actor model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (BloomConfig): Model config.
|
||||
checkpoint (bool): Enable gradient checkpointing.
|
||||
lora_rank (int): LoRA rank.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pretrained: str = None,
|
||||
config: Optional[BloomConfig] = None,
|
||||
checkpoint: bool = False,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = BloomForCausalLM.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = BloomForCausalLM(config)
|
||||
else:
|
||||
model = BloomForCausalLM(BloomConfig())
|
||||
if checkpoint:
|
||||
model.gradient_checkpointing_enable()
|
||||
super().__init__(model, lora_rank, lora_train_bias)
|
|
@ -1,36 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch.nn as nn
|
||||
from transformers import BloomConfig, BloomModel
|
||||
|
||||
from ..base import Critic
|
||||
|
||||
|
||||
class BLOOMCritic(Critic):
|
||||
"""
|
||||
BLOOM Critic model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (BloomConfig): Model config.
|
||||
lora_rank (int): LoRA rank.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pretrained: str = None,
|
||||
config: Optional[BloomConfig] = None,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
**kwargs,
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = BloomModel.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = BloomModel(config)
|
||||
else:
|
||||
model = BloomModel(BloomConfig())
|
||||
|
||||
value_head = nn.Linear(model.config.hidden_size, 1)
|
||||
super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)
|
|
@ -1,36 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch.nn as nn
|
||||
from transformers import BloomConfig, BloomModel
|
||||
|
||||
from ..base import RewardModel
|
||||
|
||||
|
||||
class BLOOMRM(RewardModel):
|
||||
"""
|
||||
BLOOM Reward model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (BloomConfig): Model config.
|
||||
lora_rank (int): LoRA rank.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pretrained: str = None,
|
||||
config: Optional[BloomConfig] = None,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = BloomModel.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = BloomModel(config)
|
||||
else:
|
||||
model = BloomModel(BloomConfig())
|
||||
|
||||
value_head = nn.Linear(model.config.hidden_size, 1)
|
||||
value_head.weight.data.normal_(mean=0.0, std=1 / (model.config.hidden_size + 1))
|
||||
super().__init__(model, value_head, lora_rank, lora_train_bias)
|
|
@ -1,3 +0,0 @@
|
|||
from .chatglm_actor import ChatGLMActor
|
||||
|
||||
__all__ = ["ChatGLMActor"]
|
|
@ -1,31 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
from ..base import Actor
|
||||
from .configuration_chatglm import ChatGLMConfig
|
||||
from .modeling_chatglm import ChatGLMForConditionalGeneration
|
||||
|
||||
|
||||
class ChatGLMActor(Actor):
|
||||
"""
|
||||
ChatGLM Actor model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (ChatGLMConfig): Model config.
|
||||
checkpoint (bool): Enable gradient checkpointing.
|
||||
|
||||
do not support lora for now.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, pretrained: str = None, config: Optional[ChatGLMConfig] = None, checkpoint: bool = False
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = ChatGLMForConditionalGeneration.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = ChatGLMForConditionalGeneration(config)
|
||||
else:
|
||||
model = ChatGLMForConditionalGeneration(ChatGLMConfig())
|
||||
if checkpoint:
|
||||
model.gradient_checkpointing_enable()
|
||||
super().__init__(model, lora_rank=0, lora_train_bias="none")
|
|
@ -1,442 +0,0 @@
|
|||
"""
|
||||
This code is copied from https://huggingface.co/THUDM/chatglm-6b/blob/main/tokenization_chatglm.py
|
||||
"""
|
||||
"""Tokenization classes for ChatGLM."""
|
||||
import os
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import sentencepiece as spm
|
||||
from transformers.tokenization_utils import PreTrainedTokenizer
|
||||
from transformers.tokenization_utils_base import BatchEncoding, EncodedInput
|
||||
from transformers.utils import PaddingStrategy, logging
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
|
||||
"THUDM/chatglm-6b": 2048,
|
||||
}
|
||||
|
||||
|
||||
class TextTokenizer:
|
||||
def __init__(self, model_path):
|
||||
self.sp = spm.SentencePieceProcessor()
|
||||
self.sp.Load(model_path)
|
||||
self.num_tokens = self.sp.vocab_size()
|
||||
|
||||
def encode(self, text):
|
||||
return self.sp.EncodeAsIds(text)
|
||||
|
||||
def decode(self, ids: List[int]):
|
||||
return self.sp.DecodeIds(ids)
|
||||
|
||||
def tokenize(self, text):
|
||||
return self.sp.EncodeAsPieces(text)
|
||||
|
||||
def convert_tokens_to_string(self, tokens):
|
||||
return self.sp.DecodePieces(tokens)
|
||||
|
||||
def convert_tokens_to_ids(self, tokens):
|
||||
return [self.sp.PieceToId(token) for token in tokens]
|
||||
|
||||
def convert_token_to_id(self, token):
|
||||
return self.sp.PieceToId(token)
|
||||
|
||||
def convert_id_to_token(self, idx):
|
||||
return self.sp.IdToPiece(idx)
|
||||
|
||||
def __len__(self):
|
||||
return self.num_tokens
|
||||
|
||||
|
||||
class SPTokenizer:
|
||||
def __init__(
|
||||
self,
|
||||
vocab_file,
|
||||
num_image_tokens=20000,
|
||||
max_blank_length=80,
|
||||
byte_fallback=True,
|
||||
):
|
||||
assert vocab_file is not None
|
||||
self.vocab_file = vocab_file
|
||||
self.num_image_tokens = num_image_tokens
|
||||
self.special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "<unused_0>", "<sop>", "<eop>", "<ENC>", "<dBLOCK>"]
|
||||
self.max_blank_length = max_blank_length
|
||||
self.byte_fallback = byte_fallback
|
||||
self.text_tokenizer = TextTokenizer(vocab_file)
|
||||
|
||||
def _get_text_tokenizer(self):
|
||||
return self.text_tokenizer
|
||||
|
||||
@staticmethod
|
||||
def get_blank_token(length: int):
|
||||
assert length >= 2
|
||||
return f"<|blank_{length}|>"
|
||||
|
||||
@staticmethod
|
||||
def get_tab_token():
|
||||
return f"<|tab|>"
|
||||
|
||||
@property
|
||||
def num_text_tokens(self):
|
||||
return self.text_tokenizer.num_tokens
|
||||
|
||||
@property
|
||||
def num_tokens(self):
|
||||
return self.num_image_tokens + self.num_text_tokens
|
||||
|
||||
@staticmethod
|
||||
def _encode_whitespaces(text: str, max_len: int = 80):
|
||||
text = text.replace("\t", SPTokenizer.get_tab_token())
|
||||
for i in range(max_len, 1, -1):
|
||||
text = text.replace(" " * i, SPTokenizer.get_blank_token(i))
|
||||
return text
|
||||
|
||||
def _preprocess(self, text: str, linebreak=True, whitespaces=True):
|
||||
if linebreak:
|
||||
text = text.replace("\n", "<n>")
|
||||
if whitespaces:
|
||||
text = self._encode_whitespaces(text, max_len=self.max_blank_length)
|
||||
return text
|
||||
|
||||
def encode(self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True) -> List[int]:
|
||||
"""
|
||||
@param text: Text to encode.
|
||||
@param linebreak: Whether to encode newline (\n) in text.
|
||||
@param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding.
|
||||
@param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text.
|
||||
@param add_dummy_prefix: Whether to add dummy blank space in the beginning.
|
||||
"""
|
||||
text = self._preprocess(text, linebreak, whitespaces)
|
||||
if not add_dummy_prefix:
|
||||
text = "<n>" + text
|
||||
tmp = self._get_text_tokenizer().encode(text)
|
||||
tokens = [x + self.num_image_tokens for x in tmp]
|
||||
return tokens if add_dummy_prefix else tokens[2:]
|
||||
|
||||
def postprocess(self, text):
|
||||
text = text.replace("<n>", "\n")
|
||||
text = text.replace(SPTokenizer.get_tab_token(), "\t")
|
||||
for i in range(2, self.max_blank_length + 1):
|
||||
text = text.replace(self.get_blank_token(i), " " * i)
|
||||
return text
|
||||
|
||||
def decode(self, text_ids: List[int]) -> str:
|
||||
ids = [int(_id) - self.num_image_tokens for _id in text_ids]
|
||||
ids = [_id for _id in ids if _id >= 0]
|
||||
text = self._get_text_tokenizer().decode(ids)
|
||||
text = self.postprocess(text)
|
||||
return text
|
||||
|
||||
def decode_tokens(self, tokens: List[str]) -> str:
|
||||
text = self._get_text_tokenizer().convert_tokens_to_string(tokens)
|
||||
text = self.postprocess(text)
|
||||
return text
|
||||
|
||||
def tokenize(self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True) -> List[str]:
|
||||
"""
|
||||
@param text: Text to encode.
|
||||
@param linebreak: Whether to encode newline (\n) in text.
|
||||
@param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding.
|
||||
@param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text.
|
||||
@param add_dummy_prefix: Whether to add dummy blank space in the beginning.
|
||||
"""
|
||||
text = self._preprocess(text, linebreak, whitespaces)
|
||||
if not add_dummy_prefix:
|
||||
text = "<n>" + text
|
||||
tokens = self._get_text_tokenizer().tokenize(text)
|
||||
return tokens if add_dummy_prefix else tokens[2:]
|
||||
|
||||
def __getitem__(self, x: Union[int, str]):
|
||||
if isinstance(x, int):
|
||||
if x < self.num_image_tokens:
|
||||
return "<image_{}>".format(x)
|
||||
else:
|
||||
return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens)
|
||||
elif isinstance(x, str):
|
||||
if x.startswith("<image_") and x.endswith(">") and x[7:-1].isdigit():
|
||||
return int(x[7:-1])
|
||||
else:
|
||||
return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens
|
||||
else:
|
||||
raise ValueError("The key should be str or int.")
|
||||
|
||||
|
||||
class ChatGLMTokenizer(PreTrainedTokenizer):
|
||||
"""
|
||||
Construct a ChatGLM tokenizer. Based on byte-level Byte-Pair-Encoding.
|
||||
|
||||
Args:
|
||||
vocab_file (`str`):
|
||||
Path to the vocabulary file.
|
||||
"""
|
||||
|
||||
vocab_files_names = {"vocab_file": "ice_text.model"}
|
||||
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
||||
model_input_names = ["input_ids", "attention_mask", "position_ids"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_file,
|
||||
do_lower_case=False,
|
||||
remove_space=False,
|
||||
bos_token="<sop>",
|
||||
eos_token="<eop>",
|
||||
end_token="</s>",
|
||||
mask_token="[MASK]",
|
||||
gmask_token="[gMASK]",
|
||||
padding_side="left",
|
||||
pad_token="<pad>",
|
||||
unk_token="<unk>",
|
||||
num_image_tokens=20000,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
do_lower_case=do_lower_case,
|
||||
remove_space=remove_space,
|
||||
padding_side=padding_side,
|
||||
bos_token=bos_token,
|
||||
eos_token=eos_token,
|
||||
end_token=end_token,
|
||||
mask_token=mask_token,
|
||||
gmask_token=gmask_token,
|
||||
pad_token=pad_token,
|
||||
unk_token=unk_token,
|
||||
num_image_tokens=num_image_tokens,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
self.do_lower_case = do_lower_case
|
||||
self.remove_space = remove_space
|
||||
self.vocab_file = vocab_file
|
||||
|
||||
self.bos_token = bos_token
|
||||
self.eos_token = eos_token
|
||||
self.end_token = end_token
|
||||
self.mask_token = mask_token
|
||||
self.gmask_token = gmask_token
|
||||
|
||||
self.sp_tokenizer = SPTokenizer(vocab_file, num_image_tokens=num_image_tokens)
|
||||
|
||||
""" Initialisation """
|
||||
|
||||
@property
|
||||
def gmask_token_id(self) -> Optional[int]:
|
||||
if self.gmask_token is None:
|
||||
return None
|
||||
return self.convert_tokens_to_ids(self.gmask_token)
|
||||
|
||||
@property
|
||||
def end_token_id(self) -> Optional[int]:
|
||||
"""
|
||||
`Optional[int]`: Id of the end of context token in the vocabulary. Returns `None` if the token has not been
|
||||
set.
|
||||
"""
|
||||
if self.end_token is None:
|
||||
return None
|
||||
return self.convert_tokens_to_ids(self.end_token)
|
||||
|
||||
@property
|
||||
def vocab_size(self):
|
||||
"""Returns vocab size"""
|
||||
return self.sp_tokenizer.num_tokens
|
||||
|
||||
def get_vocab(self):
|
||||
"""Returns vocab as a dict"""
|
||||
vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
|
||||
vocab.update(self.added_tokens_encoder)
|
||||
return vocab
|
||||
|
||||
def preprocess_text(self, inputs):
|
||||
if self.remove_space:
|
||||
outputs = " ".join(inputs.strip().split())
|
||||
else:
|
||||
outputs = inputs
|
||||
|
||||
if self.do_lower_case:
|
||||
outputs = outputs.lower()
|
||||
|
||||
return outputs
|
||||
|
||||
def _tokenize(self, text, **kwargs):
|
||||
"""Returns a tokenized string."""
|
||||
text = self.preprocess_text(text)
|
||||
|
||||
seq = self.sp_tokenizer.tokenize(text)
|
||||
|
||||
return seq
|
||||
|
||||
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
||||
return self.sp_tokenizer.decode_tokens(tokens)
|
||||
|
||||
def _decode(self, token_ids: Union[int, List[int]], **kwargs) -> str:
|
||||
if isinstance(token_ids, int):
|
||||
token_ids = [token_ids]
|
||||
if len(token_ids) == 0:
|
||||
return ""
|
||||
if self.pad_token_id in token_ids: # remove pad
|
||||
token_ids = list(filter((self.pad_token_id).__ne__, token_ids))
|
||||
return super()._decode(token_ids, **kwargs)
|
||||
|
||||
def _convert_token_to_id(self, token):
|
||||
"""Converts a token (str) in an id using the vocab."""
|
||||
return self.sp_tokenizer[token]
|
||||
|
||||
def _convert_id_to_token(self, index):
|
||||
"""Converts an index (integer) in a token (str) using the vocab."""
|
||||
return self.sp_tokenizer[index]
|
||||
|
||||
def save_vocabulary(self, save_directory, filename_prefix=None):
|
||||
"""
|
||||
Save the vocabulary and special tokens file to a directory.
|
||||
|
||||
Args:
|
||||
save_directory (`str`):
|
||||
The directory in which to save the vocabulary.
|
||||
filename_prefix (`str`, *optional*):
|
||||
An optional prefix to add to the named of the saved files.
|
||||
|
||||
Returns:
|
||||
`Tuple(str)`: Paths to the files saved.
|
||||
"""
|
||||
if os.path.isdir(save_directory):
|
||||
vocab_file = os.path.join(save_directory, self.vocab_files_names["vocab_file"])
|
||||
else:
|
||||
vocab_file = save_directory
|
||||
|
||||
with open(self.vocab_file, "rb") as fin:
|
||||
proto_str = fin.read()
|
||||
|
||||
with open(vocab_file, "wb") as writer:
|
||||
writer.write(proto_str)
|
||||
|
||||
return (vocab_file,)
|
||||
|
||||
def build_inputs_with_special_tokens(
|
||||
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
||||
) -> List[int]:
|
||||
"""
|
||||
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
||||
adding special tokens. A BERT sequence has the following format:
|
||||
|
||||
- single sequence: `[CLS] X [SEP]`
|
||||
- pair of sequences: `[CLS] A [SEP] B [SEP]`
|
||||
|
||||
Args:
|
||||
token_ids_0 (`List[int]`):
|
||||
List of IDs to which the special tokens will be added.
|
||||
token_ids_1 (`List[int]`, *optional*):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
|
||||
Returns:
|
||||
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
||||
"""
|
||||
gmask_id = self.sp_tokenizer[self.gmask_token]
|
||||
self.sp_tokenizer[self.eos_token]
|
||||
token_ids_0 = token_ids_0 + [gmask_id, self.sp_tokenizer[self.bos_token]]
|
||||
if token_ids_1 is not None:
|
||||
token_ids_0 = token_ids_0 + token_ids_1
|
||||
return token_ids_0
|
||||
|
||||
def _pad(
|
||||
self,
|
||||
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
|
||||
|
||||
Args:
|
||||
encoded_inputs:
|
||||
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
|
||||
max_length: maximum length of the returned list and optionally padding length (see below).
|
||||
Will truncate by taking into account the special tokens.
|
||||
padding_strategy: PaddingStrategy to use for padding.
|
||||
|
||||
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
|
||||
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
|
||||
- PaddingStrategy.DO_NOT_PAD: Do not pad
|
||||
The tokenizer padding sides are defined in self.padding_side:
|
||||
|
||||
- 'left': pads on the left of the sequences
|
||||
- 'right': pads on the right of the sequences
|
||||
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
|
||||
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
|
||||
`>= 7.5` (Volta).
|
||||
return_attention_mask:
|
||||
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
|
||||
"""
|
||||
# Load from model defaults
|
||||
bos_token_id = self.sp_tokenizer[self.bos_token]
|
||||
mask_token_id = self.sp_tokenizer[self.mask_token]
|
||||
gmask_token_id = self.sp_tokenizer[self.gmask_token]
|
||||
assert self.padding_side == "left"
|
||||
|
||||
required_input = encoded_inputs[self.model_input_names[0]]
|
||||
seq_length = len(required_input)
|
||||
|
||||
if padding_strategy == PaddingStrategy.LONGEST:
|
||||
max_length = len(required_input)
|
||||
|
||||
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
|
||||
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
|
||||
|
||||
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
|
||||
|
||||
# Initialize attention mask if not present.
|
||||
if max_length is not None:
|
||||
if "attention_mask" not in encoded_inputs:
|
||||
if bos_token_id in required_input:
|
||||
context_length = required_input.index(bos_token_id)
|
||||
else:
|
||||
context_length = seq_length
|
||||
attention_mask = np.ones((1, seq_length, seq_length))
|
||||
attention_mask = np.tril(attention_mask)
|
||||
attention_mask[:, :, :context_length] = 1
|
||||
attention_mask = np.bool_(attention_mask < 0.5)
|
||||
encoded_inputs["attention_mask"] = attention_mask
|
||||
|
||||
if "position_ids" not in encoded_inputs:
|
||||
if bos_token_id in required_input:
|
||||
context_length = required_input.index(bos_token_id)
|
||||
else:
|
||||
context_length = seq_length
|
||||
position_ids = np.arange(seq_length, dtype=np.int64)
|
||||
mask_token = mask_token_id if mask_token_id in required_input else gmask_token_id
|
||||
if mask_token in required_input:
|
||||
mask_position = required_input.index(mask_token)
|
||||
position_ids[context_length:] = mask_position
|
||||
block_position_ids = np.concatenate(
|
||||
[
|
||||
np.zeros(context_length, dtype=np.int64),
|
||||
np.arange(1, seq_length - context_length + 1, dtype=np.int64),
|
||||
]
|
||||
)
|
||||
encoded_inputs["position_ids"] = np.stack([position_ids, block_position_ids], axis=0)
|
||||
|
||||
if needs_to_be_padded:
|
||||
difference = max_length - len(required_input)
|
||||
|
||||
if "attention_mask" in encoded_inputs:
|
||||
encoded_inputs["attention_mask"] = np.pad(
|
||||
encoded_inputs["attention_mask"],
|
||||
pad_width=[(0, 0), (difference, 0), (difference, 0)],
|
||||
mode="constant",
|
||||
constant_values=True,
|
||||
)
|
||||
if "token_type_ids" in encoded_inputs:
|
||||
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
|
||||
"token_type_ids"
|
||||
]
|
||||
if "special_tokens_mask" in encoded_inputs:
|
||||
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
|
||||
if "position_ids" in encoded_inputs:
|
||||
encoded_inputs["position_ids"] = np.pad(
|
||||
encoded_inputs["position_ids"], pad_width=[(0, 0), (difference, 0)]
|
||||
)
|
||||
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
|
||||
|
||||
return encoded_inputs
|
|
@ -1,101 +0,0 @@
|
|||
"""
|
||||
This code is copied from https://huggingface.co/THUDM/chatglm-6b/resolve/main/configuration_chatglm.py
|
||||
"""
|
||||
|
||||
""" ChatGLM model configuration """
|
||||
|
||||
from transformers.configuration_utils import PretrainedConfig
|
||||
from transformers.utils import logging
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
class ChatGLMConfig(PretrainedConfig):
|
||||
r"""
|
||||
This is the configuration class to store the configuration of a [`~ChatGLMModel`].
|
||||
It is used to instantiate an ChatGLM model according to the specified arguments, defining the model
|
||||
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
|
||||
the ChatGLM-6B [THUDM/ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b) architecture.
|
||||
|
||||
Configuration objects inherit from [`PretrainedConfig`] and can be used
|
||||
to control the model outputs. Read the documentation from [`PretrainedConfig`]
|
||||
for more information.
|
||||
|
||||
|
||||
Args:
|
||||
vocab_size (`int`, *optional*, defaults to 150528):
|
||||
Vocabulary size of the ChatGLM-6B model. Defines the number of different tokens that can be represented by the
|
||||
`inputs_ids` passed when calling [`~ChatGLMModel`] or
|
||||
[`~TFChatGLMModel`].
|
||||
hidden_size (`int`, *optional*, defaults to 4096):
|
||||
Dimension of the encoder layers and the pooler layer.
|
||||
num_hidden_layers (`int`, *optional*, defaults to 28):
|
||||
Number of hidden layers in the Transformer encoder.
|
||||
num_attention_heads (`int`, *optional*, defaults to 32):
|
||||
Number of attention heads for each attention layer in the Transformer encoder.
|
||||
inner_hidden_size (`int`, *optional*, defaults to 16384):
|
||||
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
||||
max_sequence_length (`int`, *optional*, defaults to 512):
|
||||
The maximum sequence length that this model might ever be used with.
|
||||
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
|
||||
layernorm_epsilon (`float`, *optional*, defaults to 1e-5):
|
||||
The epsilon used by the layer normalization layers.
|
||||
use_cache (`bool`, *optional*, defaults to `True`):
|
||||
Whether the model should return the last key/values attentions (not used by all models).
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from configuration_chatglm import ChatGLMConfig
|
||||
>>> from modeling_chatglm import ChatGLMModel
|
||||
|
||||
>>> # Initializing a ChatGLM-6B THUDM/ChatGLM-6B style configuration
|
||||
>>> configuration = ChatGLMConfig()
|
||||
|
||||
>>> # Initializing a model from the THUDM/ChatGLM-6B style configuration
|
||||
>>> model = ChatGLMModel(configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
>>> configuration = model.config
|
||||
```"""
|
||||
model_type = "chatglm"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_size=130528,
|
||||
hidden_size=4096,
|
||||
num_layers=28,
|
||||
num_attention_heads=32,
|
||||
layernorm_epsilon=1e-5,
|
||||
use_cache=True,
|
||||
bos_token_id=130004,
|
||||
eos_token_id=130005,
|
||||
mask_token_id=130000,
|
||||
gmask_token_id=130001,
|
||||
pad_token_id=3,
|
||||
max_sequence_length=2048,
|
||||
inner_hidden_size=16384,
|
||||
position_encoding_2d=True,
|
||||
quantization_bit=0,
|
||||
pre_seq_len=None,
|
||||
prefix_projection=False,
|
||||
**kwargs,
|
||||
):
|
||||
self.num_layers = num_layers
|
||||
self.vocab_size = vocab_size
|
||||
self.hidden_size = hidden_size
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.max_sequence_length = max_sequence_length
|
||||
self.layernorm_epsilon = layernorm_epsilon
|
||||
self.inner_hidden_size = inner_hidden_size
|
||||
self.use_cache = use_cache
|
||||
self.bos_token_id = bos_token_id
|
||||
self.eos_token_id = eos_token_id
|
||||
self.pad_token_id = pad_token_id
|
||||
self.mask_token_id = mask_token_id
|
||||
self.gmask_token_id = gmask_token_id
|
||||
self.position_encoding_2d = position_encoding_2d
|
||||
self.quantization_bit = quantization_bit
|
||||
self.pre_seq_len = pre_seq_len
|
||||
self.prefix_projection = prefix_projection
|
||||
|
||||
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
File diff suppressed because it is too large
Load Diff
|
@ -1,152 +0,0 @@
|
|||
from typing import Any, Callable, Optional
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from transformers import PreTrainedTokenizer
|
||||
|
||||
from .base import Actor
|
||||
|
||||
try:
|
||||
from transformers.generation_logits_process import (
|
||||
LogitsProcessorList,
|
||||
TemperatureLogitsWarper,
|
||||
TopKLogitsWarper,
|
||||
TopPLogitsWarper,
|
||||
)
|
||||
except ImportError:
|
||||
from transformers.generation import LogitsProcessorList, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper
|
||||
|
||||
|
||||
def _prepare_logits_processor(
|
||||
top_k: Optional[int] = None, top_p: Optional[float] = None, temperature: Optional[float] = None
|
||||
) -> LogitsProcessorList:
|
||||
processor_list = LogitsProcessorList()
|
||||
if temperature is not None and temperature != 1.0:
|
||||
processor_list.append(TemperatureLogitsWarper(temperature))
|
||||
if top_k is not None and top_k != 0:
|
||||
processor_list.append(TopKLogitsWarper(top_k))
|
||||
if top_p is not None and top_p < 1.0:
|
||||
processor_list.append(TopPLogitsWarper(top_p))
|
||||
return processor_list
|
||||
|
||||
|
||||
def _is_sequence_finished(unfinished_sequences: torch.Tensor) -> bool:
|
||||
if dist.is_initialized() and dist.get_world_size() > 1:
|
||||
# consider DP
|
||||
unfinished_sequences = unfinished_sequences.clone()
|
||||
dist.all_reduce(unfinished_sequences)
|
||||
return unfinished_sequences.max() == 0
|
||||
|
||||
|
||||
def _sample(
|
||||
model: Actor,
|
||||
input_ids: torch.Tensor,
|
||||
max_length: int,
|
||||
early_stopping: bool = False,
|
||||
eos_token_id: Optional[int] = None,
|
||||
pad_token_id: Optional[int] = None,
|
||||
top_k: Optional[int] = None,
|
||||
top_p: Optional[float] = None,
|
||||
temperature: Optional[float] = None,
|
||||
prepare_inputs_fn: Optional[Callable[[torch.Tensor, Any], dict]] = None,
|
||||
update_model_kwargs_fn: Optional[Callable[[dict, Any], dict]] = None,
|
||||
**model_kwargs,
|
||||
) -> torch.Tensor:
|
||||
if input_ids.size(1) >= max_length:
|
||||
return input_ids
|
||||
|
||||
logits_processor = _prepare_logits_processor(top_k, top_p, temperature)
|
||||
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
|
||||
|
||||
for _ in range(input_ids.size(1), max_length):
|
||||
model_inputs = (
|
||||
prepare_inputs_fn(input_ids, **model_kwargs) if prepare_inputs_fn is not None else {"input_ids": input_ids}
|
||||
)
|
||||
outputs = model(**model_inputs)
|
||||
|
||||
# NOTE: this is correct only in left padding mode
|
||||
next_token_logits = outputs["logits"][:, -1, :]
|
||||
next_token_logits = logits_processor(input_ids, next_token_logits)
|
||||
# sample
|
||||
probs = torch.softmax(next_token_logits, dim=-1, dtype=torch.float)
|
||||
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
|
||||
|
||||
# finished sentences should have their next token be a padding token
|
||||
if eos_token_id is not None:
|
||||
assert pad_token_id is not None, "If `eos_token_id` is defined, make sure that `pad_token_id` is defined."
|
||||
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
|
||||
|
||||
# update generated ids, model inputs for next step
|
||||
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
|
||||
if update_model_kwargs_fn is not None:
|
||||
model_kwargs = update_model_kwargs_fn(outputs, model_kwargs)
|
||||
|
||||
# if eos_token was found in one sentence, set sentence to finished
|
||||
if eos_token_id is not None:
|
||||
unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long())
|
||||
|
||||
# stop when each sentence is finished if early_stopping=True
|
||||
if early_stopping and _is_sequence_finished(unfinished_sequences):
|
||||
break
|
||||
|
||||
return input_ids
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def generate(
|
||||
model: Actor,
|
||||
input_ids: torch.Tensor,
|
||||
tokenizer: PreTrainedTokenizer,
|
||||
max_length: int,
|
||||
num_beams: int = 1,
|
||||
do_sample: bool = True,
|
||||
early_stopping: bool = False,
|
||||
top_k: Optional[int] = None,
|
||||
top_p: Optional[float] = None,
|
||||
temperature: Optional[float] = None,
|
||||
prepare_inputs_fn: Optional[Callable[[torch.Tensor, Any], dict]] = None,
|
||||
update_model_kwargs_fn: Optional[Callable[[dict, Any], dict]] = None,
|
||||
**model_kwargs,
|
||||
) -> torch.Tensor:
|
||||
"""Generate token sequence. The returned sequence is input_ids + generated_tokens.
|
||||
|
||||
Args:
|
||||
model (nn.Module): model
|
||||
input_ids (torch.Tensor): input sequence
|
||||
max_length (int): max length of the returned sequence
|
||||
num_beams (int, optional): number of beams. Defaults to 1.
|
||||
do_sample (bool, optional): whether to do sample. Defaults to True.
|
||||
early_stopping (bool, optional): if True, the sequence length may be smaller than max_length due to finding eos. Defaults to False.
|
||||
top_k (Optional[int], optional): the number of highest probability vocabulary tokens to keep for top-k-filtering. Defaults to None.
|
||||
top_p (Optional[float], optional): If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. Defaults to None.
|
||||
temperature (Optional[float], optional): The value used to module the next token probabilities. Defaults to None.
|
||||
prepare_inputs_fn (Optional[Callable[[torch.Tensor, Any], dict]], optional): Function to preprocess model inputs. Arguments of this function should be input_ids and model_kwargs. Defaults to None.
|
||||
update_model_kwargs_fn (Optional[Callable[[dict, Any], dict]], optional): Function to update model_kwargs based on outputs. Arguments of this function should be outputs and model_kwargs. Defaults to None.
|
||||
"""
|
||||
assert tokenizer.padding_side == "left", "Current generation only supports left padding."
|
||||
is_greedy_gen_mode = (num_beams == 1) and do_sample is False
|
||||
is_sample_gen_mode = (num_beams == 1) and do_sample is True
|
||||
is_beam_gen_mode = (num_beams > 1) and do_sample is False
|
||||
if is_greedy_gen_mode:
|
||||
# run greedy search
|
||||
raise NotImplementedError
|
||||
elif is_sample_gen_mode:
|
||||
# run sample
|
||||
return _sample(
|
||||
model,
|
||||
input_ids,
|
||||
max_length,
|
||||
early_stopping=early_stopping,
|
||||
eos_token_id=tokenizer.eos_token_id,
|
||||
pad_token_id=tokenizer.pad_token_id,
|
||||
top_k=top_k,
|
||||
top_p=top_p,
|
||||
temperature=temperature,
|
||||
prepare_inputs_fn=prepare_inputs_fn,
|
||||
update_model_kwargs_fn=update_model_kwargs_fn,
|
||||
**model_kwargs,
|
||||
)
|
||||
elif is_beam_gen_mode:
|
||||
raise NotImplementedError
|
||||
else:
|
||||
raise ValueError("Unsupported generation mode")
|
|
@ -1,5 +0,0 @@
|
|||
from .gpt_actor import GPTActor
|
||||
from .gpt_critic import GPTCritic
|
||||
from .gpt_rm import GPTRM
|
||||
|
||||
__all__ = ["GPTActor", "GPTCritic", "GPTRM"]
|
|
@ -1,38 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
|
||||
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
|
||||
|
||||
from ..base import Actor
|
||||
|
||||
|
||||
class GPTActor(Actor):
|
||||
"""
|
||||
GPT Actor model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (GPT2Config): Model config.
|
||||
checkpoint (bool): Enable gradient checkpointing.
|
||||
lora_rank (int): Rank of the LoRa layer.
|
||||
lora_train_bias (str): Bias training strategy for the LoRa layer.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pretrained: Optional[str] = None,
|
||||
config: Optional[GPT2Config] = None,
|
||||
checkpoint: bool = False,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
**kwargs,
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = GPT2LMHeadModel.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = GPT2LMHeadModel(config)
|
||||
else:
|
||||
model = GPT2LMHeadModel(GPT2Config())
|
||||
if checkpoint:
|
||||
model.gradient_checkpointing_enable()
|
||||
super().__init__(model, lora_rank, lora_train_bias, **kwargs)
|
|
@ -1,37 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch.nn as nn
|
||||
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
|
||||
from transformers.models.gpt2.modeling_gpt2 import GPT2Model
|
||||
|
||||
from ..base import Critic
|
||||
|
||||
|
||||
class GPTCritic(Critic):
|
||||
"""
|
||||
GPT Critic model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (GPT2Config): Model config.
|
||||
lora_rank (int): Rank of the LO-RA decomposition.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pretrained: Optional[str] = None,
|
||||
config: Optional[GPT2Config] = None,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
**kwargs,
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = GPT2Model.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = GPT2Model(config)
|
||||
else:
|
||||
model = GPT2Model(GPT2Config())
|
||||
|
||||
value_head = nn.Linear(model.config.n_embd, 1)
|
||||
super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)
|
|
@ -1,37 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch.nn as nn
|
||||
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
|
||||
from transformers.models.gpt2.modeling_gpt2 import GPT2Model
|
||||
|
||||
from ..base import RewardModel
|
||||
|
||||
|
||||
class GPTRM(RewardModel):
|
||||
"""
|
||||
GPT Reward model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (GPT2Config): Model config.
|
||||
lora_rank (int): Rank of the low-rank approximation.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pretrained: Optional[str] = None,
|
||||
config: Optional[GPT2Config] = None,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = GPT2Model.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = GPT2Model(config)
|
||||
else:
|
||||
model = GPT2Model(GPT2Config())
|
||||
|
||||
value_head = nn.Linear(model.config.n_embd, 1)
|
||||
value_head.weight.data.normal_(mean=0.0, std=1 / (model.config.n_embd + 1))
|
||||
super().__init__(model, value_head, lora_rank, lora_train_bias)
|
|
@ -1,5 +0,0 @@
|
|||
from .llama_actor import LlamaActor
|
||||
from .llama_critic import LlamaCritic
|
||||
from .llama_rm import LlamaRM
|
||||
|
||||
__all__ = ["LlamaActor", "LlamaCritic", "LlamaRM"]
|
|
@ -1,38 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
from transformers import LlamaConfig, LlamaForCausalLM
|
||||
|
||||
from ..base import Actor
|
||||
|
||||
|
||||
class LlamaActor(Actor):
|
||||
"""
|
||||
Llama Actor model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (LlamaConfig): Model config.
|
||||
checkpoint (bool): Enable gradient checkpointing.
|
||||
lora_rank (int): LoRA rank.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pretrained: Optional[str] = None,
|
||||
config: Optional[LlamaConfig] = None,
|
||||
checkpoint: bool = False,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = LlamaForCausalLM.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = LlamaForCausalLM(config)
|
||||
else:
|
||||
model = LlamaForCausalLM(LlamaConfig())
|
||||
|
||||
if checkpoint:
|
||||
model.gradient_checkpointing_enable()
|
||||
|
||||
super().__init__(model, lora_rank, lora_train_bias)
|
|
@ -1,36 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch.nn as nn
|
||||
from transformers import LlamaConfig, LlamaModel
|
||||
|
||||
from ..base import Critic
|
||||
|
||||
|
||||
class LlamaCritic(Critic):
|
||||
"""
|
||||
Llama Critic model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (LlamaConfig): Model config.
|
||||
lora_rank (int): LoRA rank.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pretrained: Optional[str] = None,
|
||||
config: Optional[LlamaConfig] = None,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
**kwargs,
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = LlamaModel.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = LlamaModel(config)
|
||||
else:
|
||||
model = LlamaModel(LlamaConfig())
|
||||
|
||||
value_head = nn.Linear(model.config.hidden_size, 1)
|
||||
super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)
|
|
@ -1,37 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch.nn as nn
|
||||
from transformers import LlamaConfig, LlamaModel
|
||||
|
||||
from ..base import RewardModel
|
||||
|
||||
|
||||
class LlamaRM(RewardModel):
|
||||
"""
|
||||
Llama Reward model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (LlamaConfig): Model config.
|
||||
lora_rank (int): LoRA rank.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pretrained: Optional[str] = None,
|
||||
config: Optional[LlamaConfig] = None,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = LlamaModel.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = LlamaModel(config)
|
||||
else:
|
||||
model = LlamaModel(LlamaConfig())
|
||||
|
||||
value_head = nn.Linear(model.config.hidden_size, 1)
|
||||
value_head.weight.data.normal_(mean=0.0, std=1 / (model.config.hidden_size + 1))
|
||||
|
||||
super().__init__(model, value_head, lora_rank, lora_train_bias)
|
|
@ -1,97 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from .utils import masked_mean
|
||||
|
||||
|
||||
class GPTLMLoss(nn.Module):
|
||||
"""
|
||||
GPT Language Model Loss
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# NOTE: default ignore_index is -100, which is equal to IGNORE_INDEX in sft_dataset.py
|
||||
self.loss = nn.CrossEntropyLoss()
|
||||
|
||||
def forward(self, logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
|
||||
shift_logits = logits[..., :-1, :].contiguous()
|
||||
shift_labels = labels[..., 1:].contiguous()
|
||||
# Flatten the tokens
|
||||
return self.loss(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
||||
|
||||
|
||||
class PolicyLoss(nn.Module):
|
||||
"""
|
||||
Policy Loss for PPO
|
||||
"""
|
||||
|
||||
def __init__(self, clip_eps: float = 0.2) -> None:
|
||||
super().__init__()
|
||||
self.clip_eps = clip_eps
|
||||
|
||||
def forward(
|
||||
self,
|
||||
log_probs: torch.Tensor,
|
||||
old_log_probs: torch.Tensor,
|
||||
advantages: torch.Tensor,
|
||||
action_mask: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
ratio = (log_probs - old_log_probs).exp()
|
||||
surr1 = ratio * advantages
|
||||
surr2 = ratio.clamp(1 - self.clip_eps, 1 + self.clip_eps) * advantages
|
||||
loss = -torch.min(surr1, surr2)
|
||||
if action_mask is not None:
|
||||
loss = masked_mean(loss, action_mask)
|
||||
loss = loss.mean()
|
||||
return loss
|
||||
|
||||
|
||||
class ValueLoss(nn.Module):
|
||||
"""
|
||||
Value Loss for PPO
|
||||
"""
|
||||
|
||||
def __init__(self, clip_eps: float = 0.4) -> None:
|
||||
super().__init__()
|
||||
self.clip_eps = clip_eps
|
||||
|
||||
def forward(
|
||||
self,
|
||||
values: torch.Tensor,
|
||||
old_values: torch.Tensor,
|
||||
reward: torch.Tensor,
|
||||
action_mask: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
values_clipped = old_values + (values - old_values).clamp(-self.clip_eps, self.clip_eps)
|
||||
surr1 = (values_clipped - reward) ** 2
|
||||
surr2 = (values - reward) ** 2
|
||||
loss = torch.max(surr1, surr2)
|
||||
loss = loss.mean()
|
||||
return 0.5 * loss
|
||||
|
||||
|
||||
class LogSigLoss(nn.Module):
|
||||
"""
|
||||
Pairwise Loss for Reward Model
|
||||
Details: https://arxiv.org/abs/2203.02155
|
||||
"""
|
||||
|
||||
def forward(self, chosen_reward: torch.Tensor, reject_reward: torch.Tensor) -> torch.Tensor:
|
||||
probs = torch.sigmoid(chosen_reward - reject_reward)
|
||||
log_probs = torch.log(probs)
|
||||
loss = -log_probs.mean()
|
||||
return loss
|
||||
|
||||
|
||||
class LogExpLoss(nn.Module):
|
||||
"""
|
||||
Pairwise Loss for Reward Model
|
||||
Details: https://arxiv.org/abs/2204.05862
|
||||
"""
|
||||
|
||||
def forward(self, chosen_reward: torch.Tensor, reject_reward: torch.Tensor) -> torch.Tensor:
|
||||
loss = torch.log(1 + torch.exp(reject_reward - chosen_reward)).mean()
|
||||
return loss
|
|
@ -1,5 +0,0 @@
|
|||
from .opt_actor import OPTActor
|
||||
from .opt_critic import OPTCritic
|
||||
from .opt_rm import OPTRM
|
||||
|
||||
__all__ = ["OPTActor", "OPTCritic", "OPTRM"]
|
|
@ -1,37 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
from transformers.models.opt.configuration_opt import OPTConfig
|
||||
from transformers.models.opt.modeling_opt import OPTForCausalLM
|
||||
|
||||
from ..base import Actor
|
||||
|
||||
|
||||
class OPTActor(Actor):
|
||||
"""
|
||||
OPT Actor model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (OPTConfig): Model config.
|
||||
checkpoint (bool): Enable gradient checkpointing.
|
||||
lora_rank (int): Rank of the low-rank approximation.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pretrained: Optional[str] = None,
|
||||
config: Optional[OPTConfig] = None,
|
||||
checkpoint: bool = False,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = OPTForCausalLM.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = OPTForCausalLM(config)
|
||||
else:
|
||||
model = OPTForCausalLM(OPTConfig())
|
||||
if checkpoint:
|
||||
model.gradient_checkpointing_enable()
|
||||
super().__init__(model, lora_rank, lora_train_bias)
|
|
@ -1,37 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch.nn as nn
|
||||
from transformers.models.opt.configuration_opt import OPTConfig
|
||||
from transformers.models.opt.modeling_opt import OPTModel
|
||||
|
||||
from ..base import Critic
|
||||
|
||||
|
||||
class OPTCritic(Critic):
|
||||
"""
|
||||
OPT Critic model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (OPTConfig): Model config.
|
||||
lora_rank (int): Rank of the low-rank approximation.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pretrained: Optional[str] = None,
|
||||
config: Optional[OPTConfig] = None,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
**kwargs,
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = OPTModel.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = OPTModel(config)
|
||||
else:
|
||||
model = OPTModel(OPTConfig())
|
||||
|
||||
value_head = nn.Linear(model.config.word_embed_proj_dim, 1)
|
||||
super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)
|
|
@ -1,36 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch.nn as nn
|
||||
from transformers import OPTConfig, OPTModel
|
||||
|
||||
from ..base import RewardModel
|
||||
|
||||
|
||||
class OPTRM(RewardModel):
|
||||
"""
|
||||
OPT Reward model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (OPTConfig): Model config.
|
||||
lora_rank (int): Rank of the low-rank approximation.
|
||||
lora_train_bias (str): LoRA bias training mode.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pretrained: Optional[str] = None,
|
||||
config: Optional[OPTConfig] = None,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = "none",
|
||||
) -> None:
|
||||
if pretrained is not None:
|
||||
model = OPTModel.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = OPTModel(config)
|
||||
else:
|
||||
model = OPTModel(OPTConfig())
|
||||
|
||||
value_head = nn.Linear(model.config.word_embed_proj_dim, 1)
|
||||
value_head.weight.data.normal_(mean=0.0, std=1 / (model.config.word_embed_proj_dim + 1))
|
||||
super().__init__(model, value_head, lora_rank, lora_train_bias)
|
|
@ -1,69 +0,0 @@
|
|||
from typing import Optional, Union
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def _compute_approx_kl(
|
||||
log_probs: torch.Tensor, log_probs_base: torch.Tensor, action_mask: Optional[torch.Tensor] = None
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Compute the approximate KL divergence between two distributions.
|
||||
Schulman blog: http://joschu.net/blog/kl-approx.html
|
||||
|
||||
Args:
|
||||
log_probs: Log probabilities of the new distribution.
|
||||
log_probs_base: Log probabilities of the base distribution.
|
||||
action_mask: Mask for actions.
|
||||
"""
|
||||
|
||||
log_ratio = log_probs_base - log_probs
|
||||
approx_kl = (log_ratio.exp() - 1) - log_ratio
|
||||
if action_mask is not None:
|
||||
approx_kl = masked_mean(approx_kl, action_mask, dim=1)
|
||||
return approx_kl
|
||||
approx_kl = approx_kl.mean(dim=1)
|
||||
return approx_kl
|
||||
|
||||
|
||||
def compute_reward(
|
||||
r: Union[torch.Tensor, float],
|
||||
kl_coef: float,
|
||||
log_probs: torch.Tensor,
|
||||
log_probs_base: torch.Tensor,
|
||||
action_mask: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
if kl_coef <= 0.0:
|
||||
return r
|
||||
kl = _compute_approx_kl(log_probs, log_probs_base, action_mask=action_mask)
|
||||
reward = r - kl_coef * kl
|
||||
return reward
|
||||
|
||||
|
||||
def _log_probs_from_logits(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
|
||||
log_probs = F.log_softmax(logits, dim=-1)
|
||||
log_probs_labels = log_probs.gather(dim=-1, index=labels.unsqueeze(-1))
|
||||
return log_probs_labels.squeeze(-1)
|
||||
|
||||
|
||||
def calc_action_log_probs(logits: torch.Tensor, sequences: torch.LongTensor, num_actions: int) -> torch.Tensor:
|
||||
"""Calculate action log probs.
|
||||
|
||||
Args:
|
||||
output (torch.Tensor): Output tensor of Actor.forward.logits.
|
||||
sequences (torch.LongTensor): Input sequences.
|
||||
num_actions (int): Number of actions.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Action log probs.
|
||||
"""
|
||||
log_probs = _log_probs_from_logits(logits[:, :-1, :], sequences[:, 1:])
|
||||
return log_probs[:, -num_actions:]
|
||||
|
||||
|
||||
def masked_mean(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1) -> torch.Tensor:
|
||||
tensor = tensor * mask
|
||||
tensor = tensor.sum(dim=dim)
|
||||
mask_sum = mask.sum(dim=dim)
|
||||
mean = tensor / (mask_sum + 1e-8)
|
||||
return mean
|
|
@ -1,6 +0,0 @@
|
|||
from .base import OnPolicyTrainer, SLTrainer
|
||||
from .ppo import PPOTrainer
|
||||
from .rm import RewardModelTrainer
|
||||
from .sft import SFTTrainer
|
||||
|
||||
__all__ = ["SLTrainer", "OnPolicyTrainer", "RewardModelTrainer", "SFTTrainer", "PPOTrainer"]
|
|
@ -1,5 +0,0 @@
|
|||
from .base import Callback
|
||||
from .performance_evaluator import PerformanceEvaluator
|
||||
from .save_checkpoint import SaveCheckpoint
|
||||
|
||||
__all__ = ["Callback", "PerformanceEvaluator", "SaveCheckpoint"]
|
|
@ -1,76 +0,0 @@
|
|||
import os
|
||||
|
||||
import torch.distributed as dist
|
||||
from coati.trainer.strategies import GeminiStrategy, LowLevelZeroStrategy, Strategy
|
||||
from coati.trainer.utils import is_rank_0
|
||||
from torch import nn
|
||||
from torch.optim import Optimizer
|
||||
|
||||
from .base import Callback
|
||||
|
||||
|
||||
class SaveCheckpoint(Callback):
|
||||
"""
|
||||
The callback for saving checkpoint for coati.
|
||||
|
||||
Only support saving actor and critic model.
|
||||
A typical architecture of the saved checkpoint would be:
|
||||
- checkpoint
|
||||
- episode_x
|
||||
- actor.pt
|
||||
- actor-optim-rank-0.pt
|
||||
- actor-optim-rank-1.pt
|
||||
- critic.pt
|
||||
- critic-optim-rank-0.pt
|
||||
- critic-optim-rank-1.pt
|
||||
- ...
|
||||
|
||||
Args:
|
||||
path(str): the base path you want to save checkpoint, the checkpoint would be saved at `path/checkpoint`
|
||||
interval(int): the interval episode of saving checkpoint
|
||||
strategy(Strategy): the strategy used to train
|
||||
actor(nn.Module): the actor model
|
||||
critic(nn.Module): the critic model
|
||||
actor_optim(Optimizer): the optimizer of actor
|
||||
critic_optim(Optimizer): the optimizer of critic
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: str,
|
||||
interval: int,
|
||||
strategy: Strategy,
|
||||
actor: nn.Module = None,
|
||||
critic: nn.Module = None,
|
||||
actor_optim: Optimizer = None,
|
||||
critic_optim: Optimizer = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.path = os.path.join(path, "checkpoint")
|
||||
self.interval = interval
|
||||
self.strategy = strategy
|
||||
self.model_dict = {"actor": [actor, actor_optim], "critic": [critic, critic_optim]}
|
||||
|
||||
def on_episode_end(self, episode: int) -> None:
|
||||
if (episode + 1) % self.interval != 0:
|
||||
return
|
||||
base_path = os.path.join(self.path, f"episode_{episode}")
|
||||
if not os.path.exists(base_path):
|
||||
os.makedirs(base_path)
|
||||
|
||||
for model in self.model_dict.keys():
|
||||
# save model
|
||||
if self.model_dict[model][0] is None:
|
||||
# saving only optimizer states is meaningless, so it would be skipped
|
||||
continue
|
||||
model_path = os.path.join(base_path, f"{model}.pt")
|
||||
self.strategy.save_model(model=self.model_dict[model][0], path=model_path, only_rank0=True)
|
||||
|
||||
# save optimizer
|
||||
if self.model_dict[model][1] is None:
|
||||
continue
|
||||
only_rank0 = not isinstance(self.strategy, (LowLevelZeroStrategy, GeminiStrategy))
|
||||
rank = 0 if is_rank_0() else dist.get_rank()
|
||||
optim_path = os.path.join(base_path, f"{model}-optim-rank-{rank}.pt")
|
||||
self.strategy.save_optimizer(optimizer=self.model_dict[model][1], path=optim_path, only_rank0=only_rank0)
|
|
@ -1,202 +0,0 @@
|
|||
from typing import Dict, List, Optional
|
||||
|
||||
from coati.experience_buffer import NaiveExperienceBuffer
|
||||
from coati.experience_maker import Experience, NaiveExperienceMaker
|
||||
from coati.models.base import Actor, Critic, RewardModel, get_base_model
|
||||
from coati.models.loss import GPTLMLoss, PolicyLoss, ValueLoss
|
||||
from coati.models.utils import calc_action_log_probs
|
||||
from torch.optim import Optimizer
|
||||
from torch.utils.data import DataLoader, DistributedSampler
|
||||
from tqdm import tqdm
|
||||
from transformers import PreTrainedTokenizerBase
|
||||
|
||||
from colossalai.accelerator import get_accelerator
|
||||
|
||||
from .base import OnPolicyTrainer
|
||||
from .callbacks import Callback
|
||||
from .strategies import GeminiStrategy, Strategy
|
||||
from .utils import CycledDataLoader, is_rank_0, to_device
|
||||
|
||||
|
||||
def _set_default_generate_kwargs(strategy: Strategy, generate_kwargs: dict, actor: Actor) -> Dict:
|
||||
unwrapped_model = strategy.unwrap_model(actor)
|
||||
hf_model = get_base_model(unwrapped_model)
|
||||
new_kwargs = {**generate_kwargs}
|
||||
# use huggingface models method directly
|
||||
if "prepare_inputs_fn" not in generate_kwargs and hasattr(hf_model, "prepare_inputs_for_generation"):
|
||||
new_kwargs["prepare_inputs_fn"] = hf_model.prepare_inputs_for_generation
|
||||
|
||||
if "update_model_kwargs_fn" not in generate_kwargs and hasattr(hf_model, "_update_model_kwargs_for_generation"):
|
||||
new_kwargs["update_model_kwargs_fn"] = hf_model._update_model_kwargs_for_generation
|
||||
|
||||
return new_kwargs
|
||||
|
||||
|
||||
class PPOTrainer(OnPolicyTrainer):
|
||||
"""
|
||||
Trainer for PPO algorithm.
|
||||
|
||||
Args:
|
||||
strategy (Strategy): the strategy to use for training
|
||||
actor (Actor): the actor model in ppo algorithm
|
||||
critic (Critic): the critic model in ppo algorithm
|
||||
reward_model (RewardModel): the reward model in rlhf algorithm to make reward of sentences
|
||||
initial_model (Actor): the initial model in rlhf algorithm to generate reference logics to limit the update of actor
|
||||
actor_optim (Optimizer): the optimizer to use for actor model
|
||||
critic_optim (Optimizer): the optimizer to use for critic model
|
||||
kl_coef (float, defaults to 0.1): the coefficient of kl divergence loss
|
||||
train_batch_size (int, defaults to 8): the batch size to use for training
|
||||
buffer_limit (int, defaults to 0): the max_size limitation of buffer
|
||||
buffer_cpu_offload (bool, defaults to True): whether to offload buffer to cpu
|
||||
eps_clip (float, defaults to 0.2): the clip coefficient of policy loss
|
||||
vf_coef (float, defaults to 1.0): the coefficient of value loss
|
||||
ptx_coef (float, defaults to 0.9): the coefficient of ptx loss
|
||||
value_clip (float, defaults to 0.4): the clip coefficient of value loss
|
||||
sample_buffer (bool, defaults to False): whether to sample from buffer
|
||||
dataloader_pin_memory (bool, defaults to True): whether to pin memory for data loader
|
||||
offload_inference_models (bool, defaults to True): whether to offload inference models to cpu during training process
|
||||
callbacks (List[Callback], defaults to []): the callbacks to call during training process
|
||||
generate_kwargs (dict, optional): the kwargs to use while model generating
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
strategy: Strategy,
|
||||
actor: Actor,
|
||||
critic: Critic,
|
||||
reward_model: RewardModel,
|
||||
initial_model: Actor,
|
||||
actor_optim: Optimizer,
|
||||
critic_optim: Optimizer,
|
||||
tokenizer: PreTrainedTokenizerBase,
|
||||
kl_coef: float = 0.1,
|
||||
ptx_coef: float = 0.9,
|
||||
train_batch_size: int = 8,
|
||||
buffer_limit: int = 0,
|
||||
buffer_cpu_offload: bool = True,
|
||||
eps_clip: float = 0.2,
|
||||
vf_coef: float = 1.0,
|
||||
value_clip: float = 0.4,
|
||||
sample_buffer: bool = False,
|
||||
dataloader_pin_memory: bool = True,
|
||||
offload_inference_models: bool = True,
|
||||
callbacks: List[Callback] = [],
|
||||
**generate_kwargs,
|
||||
) -> None:
|
||||
if isinstance(strategy, GeminiStrategy):
|
||||
assert not offload_inference_models, "GeminiPlugin is not compatible with manual model.to('cpu')"
|
||||
|
||||
data_buffer = NaiveExperienceBuffer(train_batch_size, buffer_limit, buffer_cpu_offload)
|
||||
super().__init__(strategy, data_buffer, sample_buffer, dataloader_pin_memory, callbacks)
|
||||
|
||||
self.generate_kwargs = _set_default_generate_kwargs(strategy, generate_kwargs, actor)
|
||||
self.experience_maker = NaiveExperienceMaker(actor, critic, reward_model, initial_model, tokenizer, kl_coef)
|
||||
|
||||
self.actor = actor
|
||||
self.critic = critic
|
||||
self.tokenizer = tokenizer
|
||||
|
||||
self.actor_loss_fn = PolicyLoss(eps_clip)
|
||||
self.critic_loss_fn = ValueLoss(value_clip)
|
||||
self.vf_coef = vf_coef
|
||||
self.ptx_loss_fn = GPTLMLoss()
|
||||
self.ptx_coef = ptx_coef
|
||||
self.actor_optim = actor_optim
|
||||
self.critic_optim = critic_optim
|
||||
|
||||
self.offload_inference_models = offload_inference_models
|
||||
self.device = get_accelerator().get_current_device()
|
||||
|
||||
def _before_fit(
|
||||
self,
|
||||
prompt_dataloader: DataLoader,
|
||||
pretrain_dataloader: DataLoader,
|
||||
log_dir: Optional[str] = None,
|
||||
use_wandb: bool = False,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
prompt_dataloader (DataLoader): the dataloader to use for prompt data
|
||||
pretrain_dataloader (DataLoader): the dataloader to use for pretrain data
|
||||
"""
|
||||
self.prompt_dataloader = CycledDataLoader(prompt_dataloader)
|
||||
self.pretrain_dataloader = CycledDataLoader(pretrain_dataloader)
|
||||
|
||||
self.writer = None
|
||||
if use_wandb and is_rank_0():
|
||||
assert log_dir is not None, "log_dir must be provided when use_wandb is True"
|
||||
import wandb
|
||||
|
||||
wandb.init(project="Coati-ppo", sync_tensorboard=True)
|
||||
if log_dir is not None and is_rank_0():
|
||||
import os
|
||||
import time
|
||||
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
|
||||
log_dir = os.path.join(log_dir, "ppo")
|
||||
log_dir = os.path.join(log_dir, time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()))
|
||||
self.writer = SummaryWriter(log_dir=log_dir)
|
||||
|
||||
def _make_experience(self, collect_step: int) -> Experience:
|
||||
prompts = self.prompt_dataloader.next()
|
||||
if self.offload_inference_models:
|
||||
# TODO(ver217): this may be controlled by strategy if they are prepared by strategy
|
||||
self.experience_maker.initial_model.to(self.device)
|
||||
self.experience_maker.reward_model.to(self.device)
|
||||
assert isinstance(prompts, dict), f'Unsupported input type "{type(prompts)}"'
|
||||
return self.experience_maker.make_experience(**prompts, **self.generate_kwargs)
|
||||
|
||||
def _training_step(self, experience: Experience):
|
||||
self.actor.train()
|
||||
self.critic.train()
|
||||
# policy loss
|
||||
num_actions = experience.action_log_probs.size(1)
|
||||
actor_logits = self.actor(experience.sequences, experience.attention_mask)["logits"]
|
||||
action_log_probs = calc_action_log_probs(actor_logits, experience.sequences, num_actions)
|
||||
actor_loss = self.actor_loss_fn(
|
||||
action_log_probs, experience.action_log_probs, experience.advantages, action_mask=experience.action_mask
|
||||
)
|
||||
actor_loss = (1 - self.ptx_coef) * actor_loss
|
||||
self.strategy.backward(actor_loss, self.actor, self.actor_optim)
|
||||
|
||||
# ptx loss
|
||||
if self.ptx_coef != 0:
|
||||
batch = self.pretrain_dataloader.next()
|
||||
batch = to_device(batch, self.device)
|
||||
ptx_log_probs = self.actor(batch["input_ids"], batch["attention_mask"])["logits"]
|
||||
ptx_loss = self.ptx_coef * self.ptx_loss_fn(ptx_log_probs, batch["labels"])
|
||||
self.strategy.backward(ptx_loss, self.actor, self.actor_optim)
|
||||
|
||||
self.strategy.optimizer_step(self.actor_optim)
|
||||
self.actor_optim.zero_grad()
|
||||
|
||||
# value loss
|
||||
values = self.critic(experience.sequences, attention_mask=experience.attention_mask)
|
||||
critic_loss = self.critic_loss_fn(values, experience.values, experience.reward)
|
||||
critic_loss = critic_loss * self.vf_coef
|
||||
self.strategy.backward(critic_loss, self.critic, self.critic_optim)
|
||||
self.strategy.optimizer_step(self.critic_optim)
|
||||
self.critic_optim.zero_grad()
|
||||
|
||||
def _learn(self, update_step: int):
|
||||
if self.offload_inference_models:
|
||||
self.experience_maker.initial_model.to("cpu")
|
||||
self.experience_maker.reward_model.to("cpu")
|
||||
|
||||
# buffer may be empty at first, we should rebuild at each training
|
||||
if self.sample_buffer:
|
||||
experience = self.data_buffer.sample()
|
||||
self._on_learn_batch_start()
|
||||
experience.to_device(self.device)
|
||||
self._training_step(experience)
|
||||
self._on_learn_batch_end(experience)
|
||||
else:
|
||||
if isinstance(self.dataloader.sampler, DistributedSampler):
|
||||
self.dataloader.sampler.set_epoch(update_step)
|
||||
pbar = tqdm(self.dataloader, desc=f"Train epoch [{update_step + 1}]", disable=not is_rank_0())
|
||||
for experience in pbar:
|
||||
self._on_learn_batch_start()
|
||||
experience.to_device(self.device)
|
||||
self._training_step(experience)
|
||||
self._on_learn_batch_end(experience)
|
|
@ -1,123 +0,0 @@
|
|||
from typing import Callable, Optional
|
||||
|
||||
import torch
|
||||
import tqdm
|
||||
from torch.optim import Optimizer
|
||||
from torch.optim.lr_scheduler import _LRScheduler
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from .base import SLTrainer
|
||||
from .strategies import Strategy
|
||||
from .utils import is_rank_0
|
||||
|
||||
|
||||
class RewardModelTrainer(SLTrainer):
|
||||
"""
|
||||
Trainer to use while training reward model.
|
||||
|
||||
Args:
|
||||
model (torch.nn.Module): the model to train
|
||||
strategy (Strategy): the strategy to use for training
|
||||
optim (Optimizer): the optimizer to use for training
|
||||
lr_scheduler (_LRScheduler): the lr scheduler to use for training
|
||||
loss_fn (callable): the loss function to use for training
|
||||
max_epochs (int, defaults to 2): the number of epochs to train
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model,
|
||||
strategy: Strategy,
|
||||
optim: Optimizer,
|
||||
lr_scheduler: _LRScheduler,
|
||||
loss_fn: Callable,
|
||||
max_epochs: int = 1,
|
||||
) -> None:
|
||||
super().__init__(strategy, max_epochs, model, optim)
|
||||
|
||||
self.loss_fn = loss_fn
|
||||
self.scheduler = lr_scheduler
|
||||
|
||||
self.num_train_step = 0
|
||||
|
||||
def _eval(self, epoch):
|
||||
if self.eval_dataloader is not None:
|
||||
self.model.eval()
|
||||
dist, num_correct, num_samples = 0, 0, 0
|
||||
with torch.no_grad():
|
||||
for chosen_ids, c_mask, reject_ids, r_mask in self.eval_dataloader:
|
||||
chosen_ids = chosen_ids.squeeze(1).to(torch.cuda.current_device())
|
||||
c_mask = c_mask.squeeze(1).to(torch.cuda.current_device())
|
||||
reject_ids = reject_ids.squeeze(1).to(torch.cuda.current_device())
|
||||
r_mask = r_mask.squeeze(1).to(torch.cuda.current_device())
|
||||
chosen_reward = self.model(chosen_ids, attention_mask=c_mask)
|
||||
reject_reward = self.model(reject_ids, attention_mask=r_mask)
|
||||
num_samples += chosen_ids.size(0)
|
||||
num_correct += (chosen_reward > reject_reward).sum().item()
|
||||
dist += (chosen_reward - reject_reward).mean().item()
|
||||
self.dist = dist / len(self.eval_dataloader)
|
||||
self.acc = num_correct / num_samples
|
||||
|
||||
if self.writer:
|
||||
self.writer.add_scalar("eval/dist", self.dist, epoch)
|
||||
self.writer.add_scalar("eval/acc", self.acc, epoch)
|
||||
|
||||
def _train(self, epoch):
|
||||
self.model.train()
|
||||
step_bar = tqdm.trange(
|
||||
len(self.train_dataloader), desc=f"Epoch {epoch + 1}/{self.max_epochs}", disable=not is_rank_0()
|
||||
)
|
||||
for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader:
|
||||
chosen_ids = chosen_ids.squeeze(1).to(torch.cuda.current_device())
|
||||
c_mask = c_mask.squeeze(1).to(torch.cuda.current_device())
|
||||
reject_ids = reject_ids.squeeze(1).to(torch.cuda.current_device())
|
||||
r_mask = r_mask.squeeze(1).to(torch.cuda.current_device())
|
||||
chosen_reward = self.model(chosen_ids, attention_mask=c_mask)
|
||||
reject_reward = self.model(reject_ids, attention_mask=r_mask)
|
||||
loss = self.loss_fn(chosen_reward, reject_reward)
|
||||
self.strategy.backward(loss, self.model, self.optimizer)
|
||||
self.strategy.optimizer_step(self.optimizer)
|
||||
self.optimizer.zero_grad()
|
||||
if self.writer:
|
||||
self.writer.add_scalar("train/loss", loss.item(), self.num_train_step)
|
||||
self.writer.add_scalar("train/lr", self.optimizer.param_groups[0]["lr"], self.num_train_step)
|
||||
self.writer.add_scalar("train/dist", (chosen_reward - reject_reward).mean().item(), self.num_train_step)
|
||||
self.writer.add_scalar(
|
||||
"train/acc", (chosen_reward > reject_reward).float().mean().item(), self.num_train_step
|
||||
)
|
||||
self.num_train_step += 1
|
||||
if self.num_train_step % 100 == 0:
|
||||
self.scheduler.step()
|
||||
step_bar.update()
|
||||
step_bar.close()
|
||||
|
||||
def _before_fit(
|
||||
self,
|
||||
train_dataloader: DataLoader,
|
||||
eval_dataloader: DataLoader,
|
||||
log_dir: Optional[str] = None,
|
||||
use_wandb: bool = False,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
train_dataloader (DataLoader): the dataloader to use for training
|
||||
eval_dataloader (DataLoader): the dataloader to use for evaluation
|
||||
"""
|
||||
self.train_dataloader = train_dataloader
|
||||
self.eval_dataloader = eval_dataloader
|
||||
|
||||
self.writer = None
|
||||
if use_wandb and is_rank_0():
|
||||
assert log_dir is not None, "log_dir must be provided when use_wandb is True"
|
||||
import wandb
|
||||
|
||||
wandb.init(project="Coati-rm", sync_tensorboard=True)
|
||||
if log_dir is not None and is_rank_0():
|
||||
import os
|
||||
import time
|
||||
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
|
||||
log_dir = os.path.join(log_dir, "rm")
|
||||
log_dir = os.path.join(log_dir, time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()))
|
||||
self.writer = SummaryWriter(log_dir=log_dir)
|
|
@ -1,130 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import tqdm
|
||||
from torch.optim import Optimizer
|
||||
from torch.optim.lr_scheduler import _LRScheduler
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from colossalai.logging import DistributedLogger
|
||||
|
||||
from .base import SLTrainer
|
||||
from .strategies import GeminiStrategy, Strategy
|
||||
from .utils import is_rank_0, to_device
|
||||
|
||||
|
||||
class SFTTrainer(SLTrainer):
|
||||
"""
|
||||
Trainer to use while training reward model.
|
||||
|
||||
Args:
|
||||
model (torch.nn.Module): the model to train
|
||||
strategy (Strategy): the strategy to use for training
|
||||
optim(Optimizer): the optimizer to use for training
|
||||
lr_scheduler(_LRScheduler): the lr scheduler to use for training
|
||||
max_epochs (int, defaults to 2): the number of epochs to train
|
||||
accumulation_steps (int, defaults to 8): the number of steps to accumulate gradients
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model,
|
||||
strategy: Strategy,
|
||||
optim: Optimizer,
|
||||
lr_scheduler: _LRScheduler,
|
||||
max_epochs: int = 2,
|
||||
accumulation_steps: int = 8,
|
||||
) -> None:
|
||||
if accumulation_steps > 1:
|
||||
assert not isinstance(
|
||||
strategy, GeminiStrategy
|
||||
), "Accumulation steps are not supported in stage 3 of ColossalAI"
|
||||
|
||||
super().__init__(strategy, max_epochs, model, optim)
|
||||
|
||||
self.accumulation_steps = accumulation_steps
|
||||
self.scheduler = lr_scheduler
|
||||
|
||||
self.num_train_step = 0
|
||||
self.num_eval_step = 0
|
||||
|
||||
def _train(self, epoch: int):
|
||||
self.model.train()
|
||||
step_bar = tqdm.trange(
|
||||
len(self.train_dataloader) // self.accumulation_steps,
|
||||
desc=f"Epoch {epoch + 1}/{self.max_epochs}",
|
||||
disable=not is_rank_0(),
|
||||
)
|
||||
for i, batch in enumerate(self.train_dataloader):
|
||||
batch = to_device(batch, torch.cuda.current_device())
|
||||
outputs = self.model(batch["input_ids"], attention_mask=batch["attention_mask"], labels=batch["labels"])
|
||||
loss = outputs.loss / self.accumulation_steps
|
||||
self.total_loss += loss.item()
|
||||
self.strategy.backward(loss, self.model, self.optimizer)
|
||||
# gradient accumulation
|
||||
if (i + 1) % self.accumulation_steps == 0:
|
||||
self.strategy.optimizer_step(self.optimizer)
|
||||
self.optimizer.zero_grad()
|
||||
self.scheduler.step()
|
||||
if self.writer:
|
||||
self.writer.add_scalar("train/loss", self.total_loss, self.num_train_step)
|
||||
self.writer.add_scalar("train/lr", self.scheduler.get_last_lr()[0], self.num_train_step)
|
||||
self.num_train_step += 1
|
||||
self.total_loss = 0
|
||||
step_bar.update()
|
||||
step_bar.close()
|
||||
|
||||
def _eval(self, epoch: int):
|
||||
if self.eval_dataloader is not None:
|
||||
self.model.eval()
|
||||
with torch.no_grad():
|
||||
loss_sum, num_seen = 0, 0
|
||||
for batch in self.eval_dataloader:
|
||||
batch = to_device(batch, torch.cuda.current_device())
|
||||
outputs = self.model(
|
||||
batch["input_ids"], attention_mask=batch["attention_mask"], labels=batch["labels"]
|
||||
)
|
||||
loss_sum += outputs.loss.item()
|
||||
num_seen += batch["input_ids"].size(0)
|
||||
loss_mean = loss_sum / num_seen
|
||||
if dist.get_rank() == 0:
|
||||
self.logger.info(f"Eval Epoch {epoch}/{self.max_epochs} loss {loss_mean}")
|
||||
if self.writer:
|
||||
self.writer.add_scalar("eval/loss", loss_mean, self.num_eval_step)
|
||||
self.num_eval_step += 1
|
||||
|
||||
def _before_fit(
|
||||
self,
|
||||
train_dataloader: DataLoader,
|
||||
eval_dataloader: Optional[DataLoader] = None,
|
||||
logger: Optional[DistributedLogger] = None,
|
||||
log_dir: Optional[str] = None,
|
||||
use_wandb: bool = False,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
train_dataloader: the dataloader to use for training
|
||||
eval_dataloader: the dataloader to use for evaluation
|
||||
"""
|
||||
self.train_dataloader = train_dataloader
|
||||
self.eval_dataloader = eval_dataloader
|
||||
|
||||
self.logger = logger
|
||||
self.writer = None
|
||||
if use_wandb and is_rank_0():
|
||||
assert log_dir is not None, "log_dir must be provided when use_wandb is True"
|
||||
import wandb
|
||||
|
||||
wandb.init(project="Coati-sft", sync_tensorboard=True)
|
||||
if log_dir is not None and is_rank_0():
|
||||
import os
|
||||
import time
|
||||
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
|
||||
log_dir = os.path.join(log_dir, "sft")
|
||||
log_dir = os.path.join(log_dir, time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()))
|
||||
self.writer = SummaryWriter(log_dir=log_dir)
|
||||
|
||||
self.total_loss = 0
|
|
@ -1,5 +0,0 @@
|
|||
from .base import Strategy
|
||||
from .colossalai import GeminiStrategy, LowLevelZeroStrategy
|
||||
from .ddp import DDPStrategy
|
||||
|
||||
__all__ = ["Strategy", "DDPStrategy", "LowLevelZeroStrategy", "GeminiStrategy"]
|
|
@ -1,137 +0,0 @@
|
|||
from abc import ABC, abstractmethod
|
||||
from contextlib import nullcontext
|
||||
from typing import Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from coati.experience_buffer import ExperienceBuffer
|
||||
from torch.optim import Optimizer
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
|
||||
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import Plugin
|
||||
|
||||
from .sampler import DistributedSampler
|
||||
|
||||
_BoostArgSpec = Union[nn.Module, Tuple[nn.Module, Optimizer], Dict]
|
||||
|
||||
|
||||
class Strategy(ABC):
|
||||
"""
|
||||
Base class for training strategies.
|
||||
"""
|
||||
|
||||
def __init__(self, plugin_initializer: Callable[..., Optional[Plugin]] = lambda: None) -> None:
|
||||
super().__init__()
|
||||
# NOTE: dist must be initialized before Booster
|
||||
self.setup_distributed()
|
||||
self.plugin = plugin_initializer()
|
||||
self.booster = Booster(plugin=self.plugin)
|
||||
self._post_init()
|
||||
|
||||
@abstractmethod
|
||||
def _post_init(self) -> None:
|
||||
pass
|
||||
|
||||
def backward(self, loss: torch.Tensor, model: nn.Module, optimizer: Optimizer, **kwargs) -> None:
|
||||
self.booster.backward(loss, optimizer)
|
||||
|
||||
def optimizer_step(self, optimizer: Optimizer, **kwargs) -> None:
|
||||
optimizer.step()
|
||||
|
||||
@abstractmethod
|
||||
def setup_distributed(self) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def setup_dataloader(self, data_buffer: ExperienceBuffer, pin_memory: bool = False) -> DataLoader:
|
||||
pass
|
||||
|
||||
def model_init_context(self):
|
||||
return nullcontext()
|
||||
|
||||
def prepare(self, *boost_args: _BoostArgSpec) -> Union[List[_BoostArgSpec], _BoostArgSpec]:
|
||||
"""Prepare [model | (model, optimizer) | Dict] based on each strategy.
|
||||
NOTE: the keys of Dict must be a subset of `self.booster.boost`'s arguments.
|
||||
|
||||
Example::
|
||||
>>> # e.g., include lr_scheduler
|
||||
>>> result_dict = strategy.prepare(dict(model=model, lr_scheduler=lr_scheduler))
|
||||
>>> # when fine-tuning actor and critic
|
||||
>>> (actor, actor_optim), (critic, critic_optim), reward_model, initial_model = strategy.prepare((actor, actor_optim), (critic, critic_optim), reward_model, initial_model)
|
||||
>>> # or when training reward model
|
||||
>>> (reward_model, reward_model_optim) = strategy.prepare((reward_model, reward_model_optim))
|
||||
>>> # or just inference
|
||||
>>> actor, critic = strategy.prepare(actor, critic)
|
||||
|
||||
Returns:
|
||||
Union[List[_BoostArgSpec], _BoostArgSpec]: [model | (model, optimizer) | Dict] in the original order.
|
||||
"""
|
||||
|
||||
rets = []
|
||||
for arg in boost_args:
|
||||
if isinstance(arg, nn.Module):
|
||||
model, *_ = self.booster.boost(arg)
|
||||
rets.append(model)
|
||||
elif isinstance(arg, tuple):
|
||||
try:
|
||||
model, optimizer = arg
|
||||
except ValueError:
|
||||
raise RuntimeError(f'Expect (model, optimizer) pair, got a tuple with size "{len(arg)}"')
|
||||
model, optimizer, *_ = self.booster.boost(model=model, optimizer=optimizer)
|
||||
rets.append((model, optimizer))
|
||||
elif isinstance(arg, Dict):
|
||||
model, optimizer, criterion, dataloader, lr_scheduler = self.booster.boost(**arg)
|
||||
boost_result = dict(
|
||||
model=model,
|
||||
optimizer=optimizer,
|
||||
criterion=criterion,
|
||||
dataloader=dataloader,
|
||||
lr_scheduler=lr_scheduler,
|
||||
)
|
||||
# remove None values
|
||||
boost_result = {key: value for key, value in boost_result.items() if value is not None}
|
||||
rets.append(boost_result)
|
||||
else:
|
||||
raise RuntimeError(f"Type {type(arg)} is not supported")
|
||||
|
||||
return rets[0] if len(rets) == 1 else rets
|
||||
|
||||
@staticmethod
|
||||
def unwrap_model(model: nn.Module) -> nn.Module:
|
||||
"""Get the unwrapped model from a wrapped model made by Strategy.prepare.
|
||||
|
||||
Args:
|
||||
model (nn.Module): the model to unwrap
|
||||
|
||||
Returns:
|
||||
nn.Module: the original model
|
||||
"""
|
||||
return model
|
||||
|
||||
def save_model(self, model: nn.Module, path: str, shard: bool = False, **kwargs) -> None:
|
||||
self.booster.save_model(model, path, shard=shard, **kwargs)
|
||||
|
||||
def load_model(self, model: nn.Module, path: str, strict: bool = True) -> None:
|
||||
self.booster.load_model(model, path, strict)
|
||||
|
||||
def save_optimizer(self, optimizer: Optimizer, path: str, only_rank0: bool = False, **kwargs) -> None:
|
||||
self.booster.save_optimizer(optimizer, path, shard=not only_rank0, **kwargs)
|
||||
|
||||
def load_optimizer(self, optimizer: Optimizer, path: str) -> None:
|
||||
self.booster.load_optimizer(optimizer, path)
|
||||
|
||||
def setup_sampler(self, dataset) -> DistributedSampler:
|
||||
# FIXME(cwher): this is only invoked in train_on_ray, not tested after adapt Boost API.
|
||||
return DistributedSampler(dataset, 1, 0)
|
||||
|
||||
@abstractmethod
|
||||
def save_pretrained(
|
||||
self, model: nn.Module, path: str, only_rank0: bool = True, tokenizer: Optional[PreTrainedTokenizerBase] = None
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_model_state_dict_shard(self, model: nn.Module, **config):
|
||||
pass
|
|
@ -1,209 +0,0 @@
|
|||
import warnings
|
||||
from typing import Optional
|
||||
|
||||
import torch.nn as nn
|
||||
|
||||
import colossalai
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin
|
||||
from colossalai.booster.plugin.low_level_zero_plugin import LowLevelZeroModel
|
||||
from colossalai.zero.gemini.gemini_ddp import GeminiDDP
|
||||
|
||||
from .ddp import DDPStrategy
|
||||
|
||||
|
||||
class LowLevelZeroStrategy(DDPStrategy):
|
||||
"""
|
||||
The strategy for training with ColossalAI.
|
||||
|
||||
Args:
|
||||
stage(int): The stage to use in ZeRO. Choose in (1, 2)
|
||||
precision(str): The precision to use. Choose in ('fp32', 'fp16').
|
||||
seed(int): The seed for the random number generator.
|
||||
placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')
|
||||
If it is “cpu”, parameters, gradients and optimizer states will be offloaded to CPU,
|
||||
If it is “cuda”, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.
|
||||
reduce_bucket_size(int): The reduce bucket size in bytes. Only for ZeRO-1 and ZeRO-2.
|
||||
overlap_communication(bool): Whether to overlap communication and computation. Only for ZeRO-1 and ZeRO-2.
|
||||
initial_scale(float): The initial scale for the optimizer.
|
||||
growth_factor(float): The growth factor for the optimizer.
|
||||
backoff_factor(float): The backoff factor for the optimizer.
|
||||
growth_interval(int): The growth interval for the optimizer.
|
||||
hysteresis(int): The hysteresis for the optimizer.
|
||||
min_scale(float): The minimum scale for the optimizer.
|
||||
max_scale(float): The maximum scale for the optimizer.
|
||||
max_norm(float): The maximum norm for the optimizer.
|
||||
norm_type(float): The norm type for the optimizer.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stage: int = 2,
|
||||
precision: str = "fp16",
|
||||
seed: int = 42,
|
||||
placement_policy: str = "cuda",
|
||||
reduce_bucket_size: int = 12 * 1024**2, # only for stage 1&2
|
||||
overlap_communication: bool = True, # only for stage 1&2
|
||||
initial_scale: float = 2**16,
|
||||
growth_factor: float = 2,
|
||||
backoff_factor: float = 0.5,
|
||||
growth_interval: int = 1000,
|
||||
hysteresis: int = 2,
|
||||
min_scale: float = 1,
|
||||
max_scale: float = 2**32,
|
||||
max_norm: float = 0.0,
|
||||
norm_type: float = 2.0,
|
||||
) -> None:
|
||||
assert stage in (1, 2), f'Unsupported stage "{stage}"'
|
||||
assert placement_policy in ("cpu", "cuda"), f'Unsupported placement policy "{placement_policy}"'
|
||||
assert precision in ("fp32", "fp16"), f'Unsupported precision "{precision}"'
|
||||
|
||||
plugin_initializer = lambda: LowLevelZeroPlugin(
|
||||
stage=stage,
|
||||
precision=precision,
|
||||
reduce_bucket_size_in_m=reduce_bucket_size,
|
||||
overlap_communication=overlap_communication,
|
||||
cpu_offload=(placement_policy == "cpu"),
|
||||
initial_scale=initial_scale,
|
||||
growth_factor=growth_factor,
|
||||
backoff_factor=backoff_factor,
|
||||
growth_interval=growth_interval,
|
||||
hysteresis=hysteresis,
|
||||
min_scale=min_scale,
|
||||
max_scale=max_scale,
|
||||
max_norm=max_norm,
|
||||
norm_type=norm_type,
|
||||
)
|
||||
|
||||
super().__init__(seed, plugin_initializer)
|
||||
|
||||
def _post_init(self) -> None:
|
||||
assert isinstance(
|
||||
self.plugin, LowLevelZeroPlugin
|
||||
), f"{type(self).__name__}'s plugin is not initialized properly."
|
||||
|
||||
def setup_distributed(self) -> None:
|
||||
colossalai.launch_from_torch({}, seed=self.seed)
|
||||
|
||||
def unwrap_model(self, model: nn.Module) -> nn.Module:
|
||||
assert isinstance(model, LowLevelZeroModel)
|
||||
return model.module
|
||||
|
||||
def get_model_state_dict_shard(self, model: nn.Module, **config):
|
||||
assert isinstance(model, LowLevelZeroModel)
|
||||
yield from model.state_dict_shard(max_shard_size=1024, only_rank_0=False)
|
||||
|
||||
|
||||
class GeminiStrategy(DDPStrategy):
|
||||
"""
|
||||
The strategy for training with ColossalAI.
|
||||
|
||||
Args:
|
||||
seed(int): The seed for the random number generator.
|
||||
shard_init(bool): Whether to shard the model parameters during initialization. Only for ZeRO-3.
|
||||
This is not compatible with `from_pretrained()`. We temporarily disable this and will support it in the future.
|
||||
placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')
|
||||
If it is “cpu”, parameters, gradients and optimizer states will be offloaded to CPU,
|
||||
If it is “cuda”, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.
|
||||
pin_memory(bool): Whether to pin the memory for the data loader. Only for ZeRO-3.
|
||||
force_outputs_fp32(bool): Whether to force the outputs to be fp32. Only for ZeRO-3.
|
||||
search_range_m(int): The number of search range for the chunk size, divided by 2^20. Only for ZeRO-3.
|
||||
hidden_dim(optional, int): The hidden dimension for the gemini. Only for ZeRO-3.
|
||||
min_chunk_size_m(float): The minimum chunk size divided by 2^20. Only for ZeRO-3.
|
||||
gpu_margin_mem_ratio(float): The margin memory ratio for the GPU. Only for ZeRO-3.
|
||||
initial_scale(float): The initial scale for the optimizer.
|
||||
growth_factor(float): The growth factor for the optimizer.
|
||||
backoff_factor(float): The backoff factor for the optimizer.
|
||||
growth_interval(int): The growth interval for the optimizer.
|
||||
hysteresis(int): The hysteresis for the optimizer.
|
||||
min_scale(float): The minimum scale for the optimizer.
|
||||
max_scale(float): The maximum scale for the optimizer.
|
||||
max_norm(float): The maximum norm for the optimizer.
|
||||
norm_type(float): The norm type for the optimizer.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
seed: int = 42,
|
||||
shard_init: bool = False, # only for stage 3
|
||||
placement_policy: str = "auto",
|
||||
shard_param_frac: float = 1.0, # only for static placement
|
||||
offload_optim_frac: float = 0.0, # only for static placement
|
||||
offload_param_frac: float = 0.0, # only for static placement
|
||||
pin_memory: bool = True, # only for stage 3
|
||||
force_outputs_fp32: bool = False, # only for stage 3
|
||||
search_range_m: int = 32, # only for stage 3
|
||||
hidden_dim: Optional[int] = None, # only for stage 3
|
||||
min_chunk_size_m: float = 32, # only for stage 3
|
||||
gpu_margin_mem_ratio: float = 0.0, # only for stage 3
|
||||
initial_scale: float = 2**16,
|
||||
growth_factor: float = 2,
|
||||
backoff_factor: float = 0.5,
|
||||
growth_interval: int = 1000,
|
||||
hysteresis: int = 2,
|
||||
min_scale: float = 1,
|
||||
max_scale: float = 2**32,
|
||||
max_norm: float = 0.0,
|
||||
norm_type: float = 2.0,
|
||||
) -> None:
|
||||
# TODO(ver217): support shard_init when using from_pretrained()
|
||||
if shard_init:
|
||||
warnings.warn(
|
||||
f"Shard init is not supported model.from_pretrained() yet. "
|
||||
"Please load weights after strategy.prepare()"
|
||||
)
|
||||
self.shard_init = shard_init
|
||||
|
||||
warnings.warn(f"Stage 3 only supports fp16. Precision is set to fp16.")
|
||||
|
||||
# colossalai has changed api for get_current_device in 0.3.4 version or newer
|
||||
try:
|
||||
from colossalai.accelerator import get_accelerator
|
||||
|
||||
chunk_init_device = get_accelerator().get_current_device()
|
||||
except:
|
||||
from colossalai.utils import get_current_device
|
||||
|
||||
chunk_init_device = get_current_device()
|
||||
|
||||
# NOTE: dist should be initialized before calling get_current_device()
|
||||
plugin_initializer = lambda: GeminiPlugin(
|
||||
chunk_init_device=chunk_init_device,
|
||||
placement_policy=placement_policy,
|
||||
shard_param_frac=shard_param_frac,
|
||||
offload_optim_frac=offload_optim_frac,
|
||||
offload_param_frac=offload_param_frac,
|
||||
precision="fp16",
|
||||
pin_memory=pin_memory,
|
||||
force_outputs_fp32=force_outputs_fp32,
|
||||
strict_ddp_mode=shard_init,
|
||||
search_range_m=search_range_m,
|
||||
hidden_dim=hidden_dim,
|
||||
min_chunk_size_m=min_chunk_size_m,
|
||||
gpu_margin_mem_ratio=gpu_margin_mem_ratio,
|
||||
initial_scale=initial_scale,
|
||||
growth_factor=growth_factor,
|
||||
backoff_factor=backoff_factor,
|
||||
growth_interval=growth_interval,
|
||||
hysteresis=hysteresis,
|
||||
min_scale=min_scale,
|
||||
max_scale=max_scale,
|
||||
max_norm=max_norm,
|
||||
norm_type=norm_type,
|
||||
)
|
||||
|
||||
super().__init__(seed, plugin_initializer)
|
||||
|
||||
def _post_init(self) -> None:
|
||||
assert isinstance(self.plugin, GeminiPlugin), f"{type(self).__name__}'s plugin is not initialized properly."
|
||||
|
||||
def setup_distributed(self) -> None:
|
||||
colossalai.launch_from_torch({}, seed=self.seed)
|
||||
|
||||
def model_init_context(self):
|
||||
return super().model_init_context()
|
||||
|
||||
def unwrap_model(self, model: nn.Module) -> nn.Module:
|
||||
assert isinstance(model, GeminiDDP)
|
||||
return model.module
|
|
@ -1,136 +0,0 @@
|
|||
import os
|
||||
import random
|
||||
from collections import OrderedDict
|
||||
from typing import Callable, Optional
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
from coati.experience_buffer import ExperienceBuffer
|
||||
from coati.models import Actor, Critic, RewardModel
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers.modeling_utils import PreTrainedModel
|
||||
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
|
||||
|
||||
from colossalai.booster.plugin import TorchDDPPlugin
|
||||
from colossalai.booster.plugin.torch_ddp_plugin import TorchDDPModel
|
||||
|
||||
from .base import Strategy
|
||||
from .sampler import DistributedSampler
|
||||
|
||||
|
||||
# TODO Move this to a util.py (Moving to ray.util introduces ringed import)
|
||||
def get_grad_required_state_dict(model: nn.Module):
|
||||
state_dict = OrderedDict()
|
||||
for name, parameter in model.named_parameters():
|
||||
if parameter.requires_grad:
|
||||
state_dict[name] = parameter.detach()
|
||||
return state_dict
|
||||
|
||||
|
||||
class DDPStrategy(Strategy):
|
||||
"""
|
||||
Strategy for distributed training using torch.distributed.
|
||||
"""
|
||||
|
||||
def __init__(self, seed: int = 42, plugin_initializer: Callable = TorchDDPPlugin) -> None:
|
||||
self.seed = seed
|
||||
super().__init__(plugin_initializer)
|
||||
|
||||
def _try_init_dist(self, force: bool = False) -> None:
|
||||
try:
|
||||
rank = int(os.environ["RANK"])
|
||||
local_rank = int(os.environ["LOCAL_RANK"])
|
||||
world_size = int(os.environ["WORLD_SIZE"])
|
||||
host = os.environ["MASTER_ADDR"]
|
||||
port = int(os.environ["MASTER_PORT"])
|
||||
dist.init_process_group("nccl", init_method=f"tcp://[{host}]:{port}", world_size=world_size, rank=rank)
|
||||
torch.cuda.set_device(local_rank)
|
||||
except KeyError as e:
|
||||
if force:
|
||||
raise RuntimeError(
|
||||
f"Could not find {e} in the torch environment, visit https://www.colossalai.org/ for more information on launching with torch"
|
||||
)
|
||||
except Exception as e:
|
||||
if force:
|
||||
raise e
|
||||
|
||||
def _post_init(self) -> None:
|
||||
assert isinstance(self.plugin, TorchDDPPlugin), f"{type(self).__name__}'s plugin is not initialized properly."
|
||||
|
||||
def setup_distributed(self) -> None:
|
||||
self._try_init_dist(force=True)
|
||||
self.set_seed(self.seed)
|
||||
|
||||
def set_seed(self, seed: int) -> None:
|
||||
random.seed(seed)
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
|
||||
def setup_dataloader(self, data_buffer: ExperienceBuffer, pin_memory: bool = False) -> DataLoader:
|
||||
return self.plugin.prepare_dataloader(
|
||||
data_buffer,
|
||||
batch_size=data_buffer.sample_batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
pin_memory=pin_memory,
|
||||
collate_fn=data_buffer.collate_fn,
|
||||
)
|
||||
|
||||
def setup_sampler(self, dataset) -> DistributedSampler:
|
||||
# FIXME(cwher): this is only invoked in train_on_ray, not tested after adapt Boost API.
|
||||
return DistributedSampler(dataset, dist.get_world_size(), dist.get_rank())
|
||||
|
||||
def unwrap_model(self, model: nn.Module) -> nn.Module:
|
||||
assert isinstance(model, TorchDDPModel), "model is not wrapped by TorchDDPModel."
|
||||
return model.unwrap()
|
||||
|
||||
def save_pretrained(
|
||||
self, model: nn.Module, path: str, shard: bool = False, tokenizer: Optional[PreTrainedTokenizerBase] = None
|
||||
) -> None:
|
||||
if dist.get_rank() == 0:
|
||||
unwrapped_model = self.unwrap_model(model)
|
||||
assert isinstance(unwrapped_model, (Actor, Critic, RewardModel))
|
||||
pretrained_model = unwrapped_model.model
|
||||
assert isinstance(pretrained_model, PreTrainedModel)
|
||||
# HACK: only use hf save_pretrained to save config
|
||||
pretrained_model.save_pretrained(path, save_function=lambda *args, **kwargs: None)
|
||||
if tokenizer is not None:
|
||||
tokenizer.save_pretrained(path)
|
||||
|
||||
model_path = os.path.join(path, "pytorch_model.bin")
|
||||
self.save_model(model, model_path, shard=shard)
|
||||
def _replace_keys(model_path: str, replace_fn: Callable):
|
||||
state_dict = torch.load(model_path, map_location="cpu")
|
||||
state_dict = {replace_fn(k): v for k, v in state_dict.items()}
|
||||
torch.save(state_dict, model_path)
|
||||
# FIXME: save_model would add "model." prefix to keys of pytorch_model.bin
|
||||
# HACK: rename keys of pytorch_model.bin
|
||||
if dist.get_rank() == 0:
|
||||
_replace_keys(model_path, lambda k: k.replace("model.", "", 1))
|
||||
|
||||
|
||||
def get_model_state_dict_shard(self, model: nn.Module, **config):
|
||||
# TODO: implement sharding on naive strategy
|
||||
model = self.unwrap_model(model)
|
||||
if "requires_grad_only" in config and config["requires_grad_only"] == True:
|
||||
state_dict = get_grad_required_state_dict(model)
|
||||
else:
|
||||
state_dict = model.state_dict()
|
||||
|
||||
if "shard_size" in config:
|
||||
shard_size = config["shard_size"]
|
||||
accumulate_size = 0
|
||||
state_dict_shard = OrderedDict()
|
||||
for name, param in state_dict.items():
|
||||
state_dict_shard[name] = param
|
||||
accumulate_size += param.numel() * param.element_size()
|
||||
if accumulate_size >= shard_size:
|
||||
accumulate_size = 0
|
||||
yield state_dict_shard
|
||||
state_dict_shard = OrderedDict()
|
||||
if accumulate_size > 0:
|
||||
yield state_dict_shard
|
||||
else:
|
||||
yield state_dict
|
|
@ -1,31 +0,0 @@
|
|||
import math
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class DistributedSampler:
|
||||
def __init__(self, dataset, num_replicas: int, rank: int) -> None:
|
||||
self.dataset = dataset
|
||||
self.num_replicas = num_replicas
|
||||
self.rank = rank
|
||||
|
||||
if len(self.dataset) % self.num_replicas != 0:
|
||||
self.num_samples = math.ceil(
|
||||
(len(self.dataset) - self.num_replicas) / self.num_replicas # type: ignore[arg-type]
|
||||
)
|
||||
else:
|
||||
self.num_samples = math.ceil(len(self.dataset) / self.num_replicas)
|
||||
|
||||
self.total_size = self.num_samples * self.num_replicas
|
||||
|
||||
indices = list(range(len(self.dataset)))
|
||||
indices = indices[: self.total_size]
|
||||
assert len(indices) == self.total_size
|
||||
# subsample
|
||||
indices = indices[self.rank : self.total_size : self.num_replicas]
|
||||
assert len(indices) == self.num_samples
|
||||
self.indices = indices
|
||||
|
||||
def sample(self, batch_size: int) -> list:
|
||||
sampled_indices = np.random.choice(self.indices, batch_size, replace=False)
|
||||
return [self.dataset[idx] for idx in sampled_indices]
|
|
@ -1,50 +0,0 @@
|
|||
from typing import Any
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from torch.utils._pytree import tree_map
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
|
||||
class CycledDataLoader:
|
||||
"""
|
||||
Why do we need this class?
|
||||
In version 4da324cd60, "prompts = next(iter(self.prompt_dataloader))" is used to sample a batch of prompts/pretrain.
|
||||
However, this may be inefficient due to frequent re-initialization of the dataloader. (re-initialize workers...)
|
||||
NOTE: next(iter(dataloader)) is not equivalent to for batch in dataloader: break, it causes slightly different behavior.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dataloader: DataLoader,
|
||||
) -> None:
|
||||
self.dataloader = dataloader
|
||||
|
||||
self.count = 0
|
||||
self.dataloader_iter = None
|
||||
|
||||
def next(self):
|
||||
# defer initialization
|
||||
if self.dataloader_iter is None:
|
||||
self.dataloader_iter = iter(self.dataloader)
|
||||
|
||||
self.count += 1
|
||||
try:
|
||||
return next(self.dataloader_iter)
|
||||
except StopIteration:
|
||||
self.count = 0
|
||||
self.dataloader_iter = iter(self.dataloader)
|
||||
return next(self.dataloader_iter)
|
||||
|
||||
|
||||
def is_rank_0() -> bool:
|
||||
return not dist.is_initialized() or dist.get_rank() == 0
|
||||
|
||||
|
||||
def to_device(x: Any, device: torch.device) -> Any:
|
||||
def _to(t: Any):
|
||||
if isinstance(t, torch.Tensor):
|
||||
return t.to(device)
|
||||
return t
|
||||
|
||||
return tree_map(_to, x)
|
|
@ -1,409 +0,0 @@
|
|||
# Examples
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Examples](#examples)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Install requirements](#install-requirements)
|
||||
- [Supervised datasets collection](#supervised-datasets-collection)
|
||||
- [Conversation dataset generation](#conversation-dataset-generation)
|
||||
- [Stage1 - Supervised instructs tuning](#stage1---supervised-instructs-tuning)
|
||||
- [Arg List](#arg-list)
|
||||
- [Stage2 - Training reward model](#stage2---training-reward-model)
|
||||
- [Features and tricks in RM training](#features-and-tricks-in-rm-training)
|
||||
- [Experiment result](#experiment-result)
|
||||
- [Arg List](#arg-list-1)
|
||||
- [Stage3 - Training model using prompts with RL](#stage3---training-model-using-prompts-with-rl)
|
||||
- [Arg List](#arg-list-2)
|
||||
- [Inference example - After Stage3](#inference-example---after-stage3)
|
||||
- [Attention](#attention)
|
||||
- [data](#data)
|
||||
- [Support Model](#support-model)
|
||||
- [GPT](#gpt)
|
||||
- [BLOOM](#bloom)
|
||||
- [OPT](#opt)
|
||||
- [LLaMA](#llama)
|
||||
- [Add your own models](#add-your-own-models)
|
||||
- [Actor model](#actor-model)
|
||||
- [Reward model](#reward-model)
|
||||
- [Critic model](#critic-model)
|
||||
|
||||
---
|
||||
|
||||
## Install requirements
|
||||
|
||||
```shell
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Supervised datasets collection
|
||||
|
||||
We collected 104K bilingual datasets of Chinese and English, and you can find the datasets in this repo
|
||||
[InstructionWild](https://github.com/XueFuzhao/InstructionWild) and in this [file](https://github.com/XueFuzhao/InstructionWild/blob/main/data/README.md).
|
||||
|
||||
Here is how we collected the data
|
||||
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/applications/chat/data-collect.png" width=500/>
|
||||
</p>
|
||||
|
||||
### Conversation dataset generation
|
||||
|
||||
In order to further improve the model's ability to handle multi-turn conversations, we need to include samples with multi-turn conversations in the dataset. However, the samples in InstructWild and Alpaca datasets currently consist of only single-turn conversations, and their dataset organization is not suitable for storing multi-turn conversations. Additionally, after converting the aforementioned datasets, we also need to include multi-turn conversation datasets like ShareGPT, and we should transform them into the training format supported by ColossalChat.
|
||||
|
||||
A sample of conversation dataset should have the following fields:
|
||||
|
||||
- `type` (str, optional): The type of the data sample.
|
||||
- `language` (str, optional): The language of the data sample.
|
||||
- `dataset` (str, optional): The dataset the data sample originates from.
|
||||
- `conversations` (str, compulsory): Conversation content of the data sample.
|
||||
- `id` (int, optional): The ID of the data sample.
|
||||
|
||||
A simple example:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "instruction",
|
||||
"language": "English",
|
||||
"dataset": "Alpaca",
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "Give three tips for staying healthy."
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "1.Eat a balanced diet and make sure to include plenty of fruits and vegetables. \n2. Exercise regularly to keep your body active and strong. \n3. Get enough sleep and maintain a consistent sleep schedule."
|
||||
}
|
||||
],
|
||||
"id": 1
|
||||
}
|
||||
```
|
||||
|
||||
> **NOTE:** Only key `conversations` is compulsary for training and other keys serve as metadata. The length of `conversations` varies.
|
||||
|
||||
You can run the `examples/generate_conversation_dataset.py` to generate a conversation dataset supported by ColossalChat.
|
||||
|
||||
You can use the following cmd to generate conversation dataset.
|
||||
|
||||
```bash
|
||||
python generate_conversation_dataset.py \
|
||||
--dataset "All"
|
||||
--save_path "/path/to/dataset"
|
||||
```
|
||||
|
||||
## Stage1 - Supervised instructs tuning
|
||||
|
||||
Stage1 is supervised instructs fine-tuning, which uses the datasets mentioned earlier to fine-tune the model.
|
||||
[[Stage1 tutorial video]](https://www.youtube.com/watch?v=-qFBZFmOJfg)
|
||||
|
||||
You can run the `examples/train_sft.sh` to start a supervised instructs fine-tuning.
|
||||
|
||||
You can also use the following cmd to start a supervised instructs fine-tuning with your own settings.
|
||||
|
||||
```bash
|
||||
torchrun --standalone --nproc_per_node=4 train_sft.py \
|
||||
--pretrain "/path/to/LLaMa-7B/" \
|
||||
--model 'llama' \
|
||||
--strategy colossalai_zero2 \
|
||||
--save_path /path/to/Coati-7B \
|
||||
--dataset /path/to/data.json \
|
||||
--batch_size 4 \
|
||||
--accumulation_steps 8 \
|
||||
--lr 2e-5 \
|
||||
--max_datasets_size 512 \
|
||||
--max_epochs 1 \
|
||||
--grad_checkpoint
|
||||
```
|
||||
|
||||
**Note**: the supervised dataset follows the following format,
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"instruction": "Provide a list of the top 10 most popular mobile games in Asia",
|
||||
"input": "",
|
||||
"output": "The top 10 most popular mobile games in Asia are:\n1) PUBG Mobile\n2) Pokemon Go\n3) Candy Crush Saga\n4) Free Fire\n5) Clash of Clans\n6) Mario Kart Tour\n7) Arena of Valor\n8) Fantasy Westward Journey\n9) Subway Surfers\n10) ARK Survival Evolved",
|
||||
"id": 0
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
### Arg List
|
||||
|
||||
- `--strategy`: the strategy using for training, choices=['ddp', 'colossalai_gemini', 'colossalai_zero2'], default='colossalai_zero2'
|
||||
- `--model`: model type, choices=['gpt2', 'bloom', 'opt', 'llama'], default='bloom'
|
||||
- `--pretrain`: pretrain model, type=str, default=None
|
||||
- `--max_datasets_size`: the max size of dataset, type=int, default=None
|
||||
- `--save_path`: path to save the model, type=str, default='output'
|
||||
- `--need_optim_ckpt`: whether to save optim ckpt, type=bool, default=False
|
||||
- `--max_epochs`: max epochs for training, type=int, default=3
|
||||
- `--batch_size`: batch size while training, type=int, default=4
|
||||
- `--lora_rank`: low-rank adaptation matrices rank, type=int, default=0
|
||||
- `--grad_checkpoint`: enable gradient checkpointing, type=bool, default=False
|
||||
|
||||
## Stage2 - Training reward model
|
||||
|
||||
We train a reward model in stage 2, which obtains corresponding scores by manually ranking different outputs for the same prompt and supervises the training of the reward model.
|
||||
[[Stage2 tutorial video]](https://www.youtube.com/watch?v=gMx2CApKhuo)
|
||||
|
||||
You can run the `examples/train_rm.sh` to start a reward model training.
|
||||
|
||||
You can also use the following cmd to start training a reward model.
|
||||
|
||||
```bash
|
||||
torchrun --standalone --nproc_per_node=4 train_reward_model.py \
|
||||
--pretrain "/path/to/LLaMa-7B/" \
|
||||
--model 'llama' \
|
||||
--strategy colossalai_zero2 \
|
||||
--loss_fn 'log_exp'\
|
||||
--save_path 'rmstatic.pt' \
|
||||
```
|
||||
|
||||
### Features and tricks in RM training
|
||||
|
||||
- We support [Anthropic/hh-rlhf](https://huggingface.co/datasets/Anthropic/hh-rlhf)and[rm-static](https://huggingface.co/datasets/Dahoas/rm-static) datasets.
|
||||
- We support 2 kinds of loss function named `log_sig`(used by OpenAI) and `log_exp`(used by Anthropic).
|
||||
- We change the loss to `valid_acc` and `pair_dist` to monitor progress during training.
|
||||
- We add special token to the end of the sequence to get better result.
|
||||
- We use cosine-reducing lr-scheduler for RM training.
|
||||
- We set value_head as 1 liner layer and initialize the weight of value_head using N(0,1/(d_model + 1)) distribution.
|
||||
- We train a Bloom-560m reward model for 1 epoch and find the test acc of the model achieve the performance mentions in [Anthropics paper](https://arxiv.org/abs/2204.05862).
|
||||
|
||||
### Experiment result
|
||||
|
||||
Model performance in [Anthropics paper](https://arxiv.org/abs/2204.05862):
|
||||
|
||||
<div align=middle> <img width="512" alt="image" src="https://user-images.githubusercontent.com/70618399/225263321-8d64c3a8-6877-4cc8-9b61-0e1c52d3d94f.png">
|
||||
|
||||
<div align=left>Our training & test result of bloom-560m for 1 epoch:
|
||||
|
||||
<div align=middle> <img width="512" alt="image" src="https://user-images.githubusercontent.com/70618399/225262950-a7f0a686-25de-44ec-98f2-11b83ea86674.png">
|
||||
|
||||
<div align=left>We also train the reward model based on LLaMA-7B, which reaches the ACC of 72.06% after 1 epoch, performing almost the same as Anthropic's best RM.
|
||||
|
||||
### Arg List
|
||||
|
||||
- `--strategy`: the strategy using for training, choices=['ddp', 'colossalai_gemini', 'colossalai_zero2'], default='colossalai_zero2'
|
||||
- `--model`: model type, choices=['gpt2', 'bloom', 'opt', 'llama'], default='bloom'
|
||||
- `--pretrain`: pretrain model, type=str, default=None
|
||||
- `--model_path`: the path of rm model(if continue to train), type=str, default=None
|
||||
- `--save_path`: path to save the model, type=str, default='output'
|
||||
- `--need_optim_ckpt`: whether to save optim ckpt, type=bool, default=False
|
||||
- `--max_epochs`: max epochs for training, type=int, default=3
|
||||
- `--dataset`: dataset name, type=str, choices=['Anthropic/hh-rlhf', 'Dahoas/rm-static']
|
||||
- `--subset`: subset of the dataset, type=str, default=None
|
||||
- `--batch_size`: batch size while training, type=int, default=4
|
||||
- `--lora_rank`: low-rank adaptation matrices rank, type=int, default=0
|
||||
- `--loss_func`: which kind of loss function, choices=['log_sig', 'log_exp']
|
||||
- `--max_len`: max sentence length for generation, type=int, default=512
|
||||
|
||||
## Stage3 - Training model using prompts with RL
|
||||
|
||||
Stage3 uses reinforcement learning algorithm, which is the most complex part of the training process, as shown below:
|
||||
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/applications/chat/stage-3.jpeg" width=800/>
|
||||
</p>
|
||||
|
||||
You can run the `examples/train_prompts.sh` to start PPO training.
|
||||
|
||||
You can also use the cmd following to start PPO training.
|
||||
[[Stage3 tutorial video]](https://www.youtube.com/watch?v=Z8wwSHxPL9g)
|
||||
|
||||
```bash
|
||||
torchrun --standalone --nproc_per_node=4 train_prompts.py \
|
||||
--pretrain "/path/to/LLaMa-7B/" \
|
||||
--model 'llama' \
|
||||
--strategy colossalai_zero2 \
|
||||
--prompt_dataset /path/to/your/prompt_dataset \
|
||||
--pretrain_dataset /path/to/your/pretrain_dataset \
|
||||
--rm_pretrain /your/pretrain/rm/definition \
|
||||
--rm_path /your/rm/model/path
|
||||
```
|
||||
|
||||
Prompt dataset: the instruction dataset mentioned in the above figure which includes the instructions, e.g. you can use the [script](https://github.com/hpcaitech/ColossalAI/tree/main/applications/Chat/examples/generate_prompt_dataset.py) which samples `instinwild_en.json` or `instinwild_ch.json` in [InstructionWild](https://github.com/XueFuzhao/InstructionWild/tree/main/data#instructwild-data) to generate the prompt dataset.
|
||||
Pretrain dataset: the pretrain dataset including the instruction and corresponding response, e.g. you can use the [InstructWild Data](https://github.com/XueFuzhao/InstructionWild/tree/main/data) in stage 1 supervised instructs tuning.
|
||||
|
||||
**Note**: the required datasets follow the following format,
|
||||
|
||||
- `pretrain dataset`
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"instruction": "Provide a list of the top 10 most popular mobile games in Asia",
|
||||
"input": "",
|
||||
"output": "The top 10 most popular mobile games in Asia are:\n1) PUBG Mobile\n2) Pokemon Go\n3) Candy Crush Saga\n4) Free Fire\n5) Clash of Clans\n6) Mario Kart Tour\n7) Arena of Valor\n8) Fantasy Westward Journey\n9) Subway Surfers\n10) ARK Survival Evolved",
|
||||
"id": 0
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
- `prompt dataset`
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"instruction": "Edit this paragraph to make it more concise: \"Yesterday, I went to the store and bought some things. Then, I came home and put them away. After that, I went for a walk and met some friends.\"",
|
||||
"id": 0
|
||||
},
|
||||
{
|
||||
"instruction": "Write a descriptive paragraph about a memorable vacation you went on",
|
||||
"id": 1
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
### Arg List
|
||||
|
||||
- `--strategy`: the strategy using for training, choices=['ddp', 'colossalai_gemini', 'colossalai_zero2'], default='colossalai_zero2'
|
||||
- `--model`: model type of actor, choices=['gpt2', 'bloom', 'opt', 'llama'], default='bloom'
|
||||
- `--pretrain`: pretrain model, type=str, default=None
|
||||
- `--rm_model`: reward model type, type=str, choices=['gpt2', 'bloom', 'opt', 'llama'], default=None
|
||||
- `--rm_pretrain`: pretrain model for reward model, type=str, default=None
|
||||
- `--rm_path`: the path of rm model, type=str, default=None
|
||||
- `--save_path`: path to save the model, type=str, default='output'
|
||||
- `--prompt_dataset`: path of the prompt dataset, type=str, default=None
|
||||
- `--pretrain_dataset`: path of the ptx dataset, type=str, default=None
|
||||
- `--need_optim_ckpt`: whether to save optim ckpt, type=bool, default=False
|
||||
- `--num_episodes`: num of episodes for training, type=int, default=10
|
||||
- `--num_update_steps`: number of steps to update policy per episode, type=int
|
||||
- `--num_collect_steps`: number of steps to collect experience per episode, type=int
|
||||
- `--train_batch_size`: batch size while training, type=int, default=8
|
||||
- `--ptx_batch_size`: batch size to compute ptx loss, type=int, default=1
|
||||
- `--experience_batch_size`: batch size to make experience, type=int, default=8
|
||||
- `--lora_rank`: low-rank adaptation matrices rank, type=int, default=0
|
||||
- `--kl_coef`: kl_coef using for computing reward, type=float, default=0.1
|
||||
- `--ptx_coef`: ptx_coef using for computing policy loss, type=float, default=0.9
|
||||
|
||||
## Inference example - After Stage3
|
||||
|
||||
We support different inference options, including int8 and int4 quantization.
|
||||
For details, see [`inference/`](https://github.com/hpcaitech/ColossalAI/tree/main/applications/Chat/inference).
|
||||
|
||||
## Attention
|
||||
|
||||
The examples are demos for the whole training process.You need to change the hyper-parameters to reach great performance.
|
||||
|
||||
#### data
|
||||
|
||||
- [x] [rm-static](https://huggingface.co/datasets/Dahoas/rm-static)
|
||||
- [x] [hh-rlhf](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
||||
- [ ] [openai/summarize_from_feedback](https://huggingface.co/datasets/openai/summarize_from_feedback)
|
||||
- [ ] [openai/webgpt_comparisons](https://huggingface.co/datasets/openai/webgpt_comparisons)
|
||||
- [ ] [Dahoas/instruct-synthetic-prompt-responses](https://huggingface.co/datasets/Dahoas/instruct-synthetic-prompt-responses)
|
||||
|
||||
## Support Model
|
||||
|
||||
### GPT
|
||||
|
||||
- [x] GPT2-S (s)
|
||||
- [x] GPT2-M (m)
|
||||
- [x] GPT2-L (l)
|
||||
- [x] GPT2-XL (xl)
|
||||
- [x] GPT2-4B (4b)
|
||||
- [ ] GPT2-6B (6b)
|
||||
|
||||
### BLOOM
|
||||
|
||||
- [x] [BLOOM-560m](https://huggingface.co/bigscience/bloom-560m)
|
||||
- [x] [BLOOM-1b1](https://huggingface.co/bigscience/bloom-1b1)
|
||||
- [x] [BLOOM-3b](https://huggingface.co/bigscience/bloom-3b)
|
||||
- [x] [BLOOM-7b](https://huggingface.co/bigscience/bloom-7b1)
|
||||
- [ ] [BLOOM-175b](https://huggingface.co/bigscience/bloom)
|
||||
|
||||
### OPT
|
||||
|
||||
- [x] [OPT-125M](https://huggingface.co/facebook/opt-125m)
|
||||
- [x] [OPT-350M](https://huggingface.co/facebook/opt-350m)
|
||||
- [x] [OPT-1.3B](https://huggingface.co/facebook/opt-1.3b)
|
||||
- [x] [OPT-2.7B](https://huggingface.co/facebook/opt-2.7b)
|
||||
- [x] [OPT-6.7B](https://huggingface.co/facebook/opt-6.7b)
|
||||
- [ ] [OPT-13B](https://huggingface.co/facebook/opt-13b)
|
||||
- [ ] [OPT-30B](https://huggingface.co/facebook/opt-30b)
|
||||
|
||||
### [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md)
|
||||
|
||||
- [x] LLaMA-7B
|
||||
- [x] LLaMA-13B
|
||||
- [ ] LLaMA-33B
|
||||
- [ ] LLaMA-65B
|
||||
|
||||
## Add your own models
|
||||
|
||||
If you want to support your own model in Coati, please refer the pull request for RoBERTa support as an example --[[chatgpt] add pre-trained model RoBERTa for RLHF stage 2 & 3](https://github.com/hpcaitech/ColossalAI/pull/3223), and submit a PR to us.
|
||||
|
||||
You should complete the implementation of four model classes, including Reward model, Critic model, LM model, Actor model
|
||||
|
||||
here are some example code for a NewModel named `Coati`.
|
||||
if it is supported in huggingface [transformers](https://github.com/huggingface/transformers), you can load it by `from_pretrained`, o
|
||||
r you can build your own model by yourself.
|
||||
|
||||
### Actor model
|
||||
|
||||
```python
|
||||
from ..base import Actor
|
||||
from transformers.models.coati import CoatiModel
|
||||
|
||||
class CoatiActor(Actor):
|
||||
def __init__(self,
|
||||
pretrained: Optional[str] = None,
|
||||
checkpoint: bool = False,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = 'none') -> None:
|
||||
if pretrained is not None:
|
||||
model = CoatiModel.from_pretrained(pretrained)
|
||||
else:
|
||||
model = build_model() # load your own model if it is not support in transformers
|
||||
|
||||
super().__init__(model, lora_rank, lora_train_bias)
|
||||
```
|
||||
|
||||
### Reward model
|
||||
|
||||
```python
|
||||
from ..base import RewardModel
|
||||
from transformers.models.coati import CoatiModel
|
||||
|
||||
class CoatiRM(RewardModel):
|
||||
|
||||
def __init__(self,
|
||||
pretrained: Optional[str] = None,
|
||||
checkpoint: bool = False,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = 'none') -> None:
|
||||
if pretrained is not None:
|
||||
model = CoatiModel.from_pretrained(pretrained)
|
||||
else:
|
||||
model = build_model() # load your own model if it is not support in transformers
|
||||
|
||||
value_head = nn.Linear(model.config.n_embd, 1)
|
||||
value_head.weight.data.normal_(mean=0.0, std=1 / (model.config.n_embd + 1))
|
||||
super().__init__(model, value_head, lora_rank, lora_train_bias)
|
||||
```
|
||||
|
||||
### Critic model
|
||||
|
||||
```python
|
||||
from ..base import Critic
|
||||
from transformers.models.coati import CoatiModel
|
||||
|
||||
class CoatiCritic(Critic):
|
||||
def __init__(self,
|
||||
pretrained: Optional[str] = None,
|
||||
checkpoint: bool = False,
|
||||
lora_rank: int = 0,
|
||||
lora_train_bias: str = 'none') -> None:
|
||||
if pretrained is not None:
|
||||
model = CoatiModel.from_pretrained(pretrained)
|
||||
else:
|
||||
model = build_model() # load your own model if it is not support in transformers
|
||||
|
||||
value_head = nn.Linear(model.config.n_embd, 1)
|
||||
value_head.weight.data.normal_(mean=0.0, std=1 / (model.config.n_embd + 1))
|
||||
super().__init__(model, value_head, lora_rank, lora_train_bias)
|
||||
```
|
|
@ -1,79 +0,0 @@
|
|||
import argparse
|
||||
import dataclasses
|
||||
import os
|
||||
import parser
|
||||
from typing import List
|
||||
|
||||
import tqdm
|
||||
from coati.models.bloom import BLOOMRM, BLOOMActor, BLOOMCritic
|
||||
from coati.models.gpt import GPTRM, GPTActor, GPTCritic
|
||||
from coati.models.opt import OPTRM, OPTActor, OPTCritic
|
||||
from huggingface_hub import hf_hub_download, snapshot_download
|
||||
from transformers import AutoConfig, AutoTokenizer, BloomConfig, BloomTokenizerFast, GPT2Config, GPT2Tokenizer
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class HFRepoFiles:
|
||||
repo_id: str
|
||||
files: List[str]
|
||||
|
||||
def download(self, dir_path: str):
|
||||
for file in self.files:
|
||||
file_path = hf_hub_download(self.repo_id, file, local_dir=dir_path)
|
||||
|
||||
def download_all(self):
|
||||
snapshot_download(self.repo_id)
|
||||
|
||||
|
||||
def test_init(model: str, dir_path: str):
|
||||
if model == "gpt2":
|
||||
config = GPT2Config.from_pretrained(dir_path)
|
||||
actor = GPTActor(config=config)
|
||||
critic = GPTCritic(config=config)
|
||||
reward_model = GPTRM(config=config)
|
||||
GPT2Tokenizer.from_pretrained(dir_path)
|
||||
elif model == "bloom":
|
||||
config = BloomConfig.from_pretrained(dir_path)
|
||||
actor = BLOOMActor(config=config)
|
||||
critic = BLOOMCritic(config=config)
|
||||
reward_model = BLOOMRM(config=config)
|
||||
BloomTokenizerFast.from_pretrained(dir_path)
|
||||
elif model == "opt":
|
||||
config = AutoConfig.from_pretrained(dir_path)
|
||||
actor = OPTActor(config=config)
|
||||
critic = OPTCritic(config=config)
|
||||
reward_model = OPTRM(config=config)
|
||||
AutoTokenizer.from_pretrained(dir_path)
|
||||
else:
|
||||
raise NotImplementedError(f"Model {model} not implemented")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--model-dir", type=str, default="test_models")
|
||||
parser.add_argument("--config-only", default=False, action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
if os.path.exists(args.model_dir):
|
||||
print(f"[INFO]: {args.model_dir} already exists")
|
||||
exit(0)
|
||||
|
||||
repo_list = {
|
||||
"gpt2": HFRepoFiles(repo_id="gpt2", files=["config.json", "tokenizer.json", "vocab.json", "merges.txt"]),
|
||||
"bloom": HFRepoFiles(
|
||||
repo_id="bigscience/bloom-560m", files=["config.json", "tokenizer.json", "tokenizer_config.json"]
|
||||
),
|
||||
"opt": HFRepoFiles(
|
||||
repo_id="facebook/opt-350m", files=["config.json", "tokenizer_config.json", "vocab.json", "merges.txt"]
|
||||
),
|
||||
}
|
||||
|
||||
os.mkdir(args.model_dir)
|
||||
for model_name in tqdm.tqdm(repo_list):
|
||||
dir_path = os.path.join(args.model_dir, model_name)
|
||||
if args.config_only:
|
||||
os.mkdir(dir_path)
|
||||
repo_list[model_name].download(dir_path)
|
||||
else:
|
||||
repo_list[model_name].download_all()
|
||||
test_init(model_name, dir_path)
|
|
@ -1,82 +0,0 @@
|
|||
import argparse
|
||||
import json
|
||||
|
||||
from datasets import load_dataset
|
||||
|
||||
|
||||
def generate_alpaca():
|
||||
# We can convert dataset with the same format("instruction", "input", "output") as Alpaca into a one-round conversation.
|
||||
conversation_dataset = []
|
||||
dataset = load_dataset("tatsu-lab/alpaca", split="train")
|
||||
|
||||
instructions = dataset["instruction"]
|
||||
inputs = dataset["input"]
|
||||
outputs = dataset["output"]
|
||||
|
||||
assert len(instructions) == len(inputs) == len(outputs)
|
||||
|
||||
for idx in range(len(instructions)):
|
||||
human_utterance = instructions[idx] + "\n\n" + inputs[idx] if inputs[idx] else instructions[idx]
|
||||
human = {"from": "human", "value": human_utterance}
|
||||
|
||||
gpt_utterance = outputs[idx]
|
||||
gpt = {"from": "gpt", "value": gpt_utterance}
|
||||
|
||||
conversation = dict(type="instruction", language="English", dataset="Alpaca", conversations=[human, gpt])
|
||||
conversation_dataset.append(conversation)
|
||||
|
||||
return conversation_dataset
|
||||
|
||||
|
||||
def generate_sharegpt():
|
||||
# ShareGPT data requires less processing.
|
||||
conversation_dataset = []
|
||||
dataset = load_dataset(
|
||||
"anon8231489123/ShareGPT_Vicuna_unfiltered",
|
||||
data_files="ShareGPT_V3_unfiltered_cleaned_split_no_imsorry.json",
|
||||
split="train",
|
||||
)
|
||||
|
||||
conversations = dataset["conversations"]
|
||||
|
||||
for idx in range(len(conversations)):
|
||||
for conv in conversations[idx]:
|
||||
# We don't need markdown and text value.
|
||||
del conv["markdown"]
|
||||
del conv["text"]
|
||||
|
||||
conversation = dict(
|
||||
type="conversation", language="Multilingual", dataset="ShareGPT", conversations=conversations[idx]
|
||||
)
|
||||
conversation_dataset.append(conversation)
|
||||
|
||||
return conversation_dataset
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--dataset",
|
||||
type=str,
|
||||
default="All",
|
||||
choices=["Alpaca", "ShareGPT", "All"],
|
||||
help="which dataset to convert, All will combine Alpaca and ShareGPT",
|
||||
)
|
||||
parser.add_argument("--save_path", type=str, default="dataset.json", help="path to save the converted dataset")
|
||||
args = parser.parse_args()
|
||||
|
||||
conversation_dataset = []
|
||||
|
||||
if args.dataset == "Alpaca":
|
||||
conversation_dataset.extend(generate_alpaca())
|
||||
elif args.dataset == "ShareGPT":
|
||||
conversation_dataset.extend(generate_sharegpt())
|
||||
else:
|
||||
conversation_dataset.extend(generate_alpaca())
|
||||
conversation_dataset.extend(generate_sharegpt())
|
||||
|
||||
for idx, sample in enumerate(conversation_dataset):
|
||||
sample["id"] = idx + 1
|
||||
|
||||
with open(args.save_path, mode="w") as f:
|
||||
json.dump(conversation_dataset, f, indent=4, default=str, ensure_ascii=False)
|
|
@ -1,27 +0,0 @@
|
|||
import argparse
|
||||
import json
|
||||
import random
|
||||
|
||||
random.seed(42)
|
||||
|
||||
|
||||
def sample(args):
|
||||
with open(args.dataset_path, mode="r") as f:
|
||||
dataset_list = json.load(f)
|
||||
|
||||
sampled_dataset = [
|
||||
{"instruction": sample["instruction"], "id": idx}
|
||||
for idx, sample in enumerate(random.sample(dataset_list, args.sample_size))
|
||||
]
|
||||
|
||||
with open(args.save_path, mode="w") as f:
|
||||
json.dump(sampled_dataset, f, indent=4, default=str, ensure_ascii=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--dataset_path", type=str, default=None, required=True, help="path to the pretrain dataset")
|
||||
parser.add_argument("--save_path", type=str, default="prompt.json", help="path to save the prompt dataset")
|
||||
parser.add_argument("--sample_size", type=int, default=16384, help="size of the prompt dataset")
|
||||
args = parser.parse_args()
|
||||
sample(args)
|
|
@ -1,73 +0,0 @@
|
|||
import argparse
|
||||
|
||||
import torch
|
||||
from coati.models.bloom import BLOOMActor
|
||||
from coati.models.generation import generate
|
||||
from coati.models.gpt import GPTActor
|
||||
from coati.models.llama import LlamaActor
|
||||
from coati.models.opt import OPTActor
|
||||
from transformers import AutoTokenizer, BloomTokenizerFast, GPT2Tokenizer, LlamaTokenizer
|
||||
|
||||
|
||||
def eval(args):
|
||||
# configure model
|
||||
if args.model == "gpt2":
|
||||
actor = GPTActor(pretrained=args.pretrain)
|
||||
elif args.model == "bloom":
|
||||
actor = BLOOMActor(pretrained=args.pretrain)
|
||||
elif args.model == "opt":
|
||||
actor = OPTActor(pretrained=args.pretrain)
|
||||
elif args.model == "llama":
|
||||
actor = LlamaActor(pretrained=args.pretrain)
|
||||
else:
|
||||
raise ValueError(f'Unsupported model "{args.model}"')
|
||||
|
||||
actor.to(torch.cuda.current_device())
|
||||
if args.model_path is not None:
|
||||
state_dict = torch.load(args.model_path)
|
||||
actor.load_state_dict(state_dict)
|
||||
|
||||
# configure tokenizer
|
||||
if args.model == "gpt2":
|
||||
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif args.model == "bloom":
|
||||
tokenizer = BloomTokenizerFast.from_pretrained("bigscience/bloom-560m")
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif args.model == "opt":
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif args.model == "llama":
|
||||
tokenizer = LlamaTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer")
|
||||
tokenizer.eos_token = "</s>"
|
||||
tokenizer.pad_token = tokenizer.unk_token
|
||||
else:
|
||||
raise ValueError(f'Unsupported model "{args.model}"')
|
||||
|
||||
actor.eval()
|
||||
tokenizer.padding_side = "left"
|
||||
input_ids = tokenizer.encode(args.input, return_tensors="pt").to(torch.cuda.current_device())
|
||||
outputs = generate(
|
||||
actor,
|
||||
input_ids,
|
||||
tokenizer=tokenizer,
|
||||
max_length=args.max_length,
|
||||
do_sample=True,
|
||||
top_k=50,
|
||||
top_p=0.95,
|
||||
num_return_sequences=1,
|
||||
)
|
||||
output = tokenizer.batch_decode(outputs[0], skip_special_tokens=True)
|
||||
print(f"[Output]: {''.join(output)}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--model", default="gpt2", choices=["gpt2", "bloom", "opt", "llama"])
|
||||
# We suggest to use the pretrained model from HuggingFace, use pretrain to configure model
|
||||
parser.add_argument("--pretrain", type=str, default=None)
|
||||
parser.add_argument("--model_path", type=str, default=None)
|
||||
parser.add_argument("--input", type=str, default="Question: How are you ? Answer:")
|
||||
parser.add_argument("--max_length", type=int, default=100)
|
||||
args = parser.parse_args()
|
||||
eval(args)
|
|
@ -1,249 +0,0 @@
|
|||
import argparse
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from coati.dataset import PromptDataset, SupervisedDataset
|
||||
from coati.models.bloom import BLOOMRM, BLOOMActor, BLOOMCritic
|
||||
from coati.models.gpt import GPTRM, GPTActor, GPTCritic
|
||||
from coati.models.llama import LlamaActor, LlamaCritic, LlamaRM
|
||||
from coati.models.opt import OPTRM, OPTActor, OPTCritic
|
||||
from coati.trainer import PPOTrainer
|
||||
from coati.trainer.strategies import DDPStrategy, GeminiStrategy, LowLevelZeroStrategy
|
||||
from torch.optim import Adam
|
||||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data.distributed import DistributedSampler
|
||||
from transformers import AutoTokenizer, BloomTokenizerFast, GPT2Tokenizer, LlamaTokenizer
|
||||
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
|
||||
|
||||
def main(args):
|
||||
# configure strategy
|
||||
if args.strategy == "ddp":
|
||||
strategy = DDPStrategy()
|
||||
elif args.strategy == "colossalai_gemini":
|
||||
strategy = GeminiStrategy(placement_policy="static", initial_scale=2**5)
|
||||
elif args.strategy == "colossalai_zero2":
|
||||
strategy = LowLevelZeroStrategy(stage=2, placement_policy="cuda")
|
||||
else:
|
||||
raise ValueError(f'Unsupported strategy "{args.strategy}"')
|
||||
|
||||
if args.rm_path is not None:
|
||||
warnings.warn("LoRA weights should be merged with the model weights")
|
||||
state_dict = torch.load(args.rm_path, map_location="cpu")
|
||||
|
||||
if args.lora_rank > 0:
|
||||
warnings.warn("Lora is not supported yet.")
|
||||
args.lora_rank = 0
|
||||
|
||||
with strategy.model_init_context():
|
||||
# configure model
|
||||
if args.model == "gpt2":
|
||||
initial_model = GPTActor(pretrained=args.pretrain)
|
||||
elif args.model == "bloom":
|
||||
initial_model = BLOOMActor(pretrained=args.pretrain)
|
||||
elif args.model == "opt":
|
||||
initial_model = OPTActor(pretrained=args.pretrain)
|
||||
elif args.model == "llama":
|
||||
initial_model = LlamaActor(pretrained=args.pretrain)
|
||||
else:
|
||||
raise ValueError(f'Unsupported actor model "{args.model}"')
|
||||
|
||||
if args.rm_model is None:
|
||||
rm_model_name = args.model
|
||||
else:
|
||||
rm_model_name = args.rm_model
|
||||
|
||||
if rm_model_name == "gpt2":
|
||||
reward_model = GPTRM(pretrained=args.rm_pretrain, lora_rank=args.lora_rank)
|
||||
elif rm_model_name == "bloom":
|
||||
reward_model = BLOOMRM(pretrained=args.rm_pretrain, lora_rank=args.lora_rank)
|
||||
elif rm_model_name == "opt":
|
||||
reward_model = OPTRM(pretrained=args.rm_pretrain, lora_rank=args.lora_rank)
|
||||
elif rm_model_name == "llama":
|
||||
reward_model = LlamaRM(pretrained=args.rm_pretrain, lora_rank=args.lora_rank)
|
||||
else:
|
||||
raise ValueError(f'Unsupported reward model "{rm_model_name}"')
|
||||
|
||||
if args.rm_path is not None:
|
||||
reward_model.load_state_dict(state_dict, strict=False)
|
||||
|
||||
initial_model.to(torch.bfloat16).to(torch.cuda.current_device())
|
||||
reward_model.to(torch.bfloat16).to(torch.cuda.current_device())
|
||||
|
||||
if args.model == "gpt2":
|
||||
actor = GPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
|
||||
elif args.model == "bloom":
|
||||
actor = BLOOMActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
|
||||
elif args.model == "opt":
|
||||
actor = OPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
|
||||
elif args.model == "llama":
|
||||
actor = LlamaActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
|
||||
else:
|
||||
raise ValueError(f'Unsupported actor model "{args.model}"')
|
||||
|
||||
if rm_model_name == "gpt2":
|
||||
critic = GPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank)
|
||||
elif rm_model_name == "bloom":
|
||||
critic = BLOOMCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank)
|
||||
elif rm_model_name == "opt":
|
||||
critic = OPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank)
|
||||
elif rm_model_name == "llama":
|
||||
critic = LlamaCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank)
|
||||
else:
|
||||
raise ValueError(f'Unsupported reward model "{rm_model_name}"')
|
||||
|
||||
if args.rm_path is not None:
|
||||
critic.load_state_dict(state_dict, strict=False)
|
||||
del state_dict
|
||||
|
||||
actor.to(torch.bfloat16).to(torch.cuda.current_device())
|
||||
critic.to(torch.bfloat16).to(torch.cuda.current_device())
|
||||
|
||||
# configure optimizer
|
||||
if args.strategy.startswith("colossalai"):
|
||||
actor_optim = HybridAdam(actor.parameters(), lr=args.lr)
|
||||
critic_optim = HybridAdam(critic.parameters(), lr=args.lr)
|
||||
else:
|
||||
actor_optim = Adam(actor.parameters(), lr=args.lr)
|
||||
critic_optim = Adam(critic.parameters(), lr=args.lr)
|
||||
|
||||
# configure tokenizer
|
||||
if args.model == "gpt2":
|
||||
tokenizer = GPT2Tokenizer.from_pretrained("gpt2" if args.tokenizer is None else args.tokenizer)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif args.model == "bloom":
|
||||
tokenizer = BloomTokenizerFast.from_pretrained(
|
||||
"bigscience/bloom-560m" if args.tokenizer is None else args.tokenizer
|
||||
)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif args.model == "opt":
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m" if args.tokenizer is None else args.tokenizer)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif args.model == "llama":
|
||||
tokenizer = LlamaTokenizer.from_pretrained(
|
||||
"hf-internal-testing/llama-tokenizer" if args.tokenizer is None else args.tokenizer
|
||||
)
|
||||
tokenizer.eos_token = "</s>"
|
||||
tokenizer.pad_token = tokenizer.unk_token
|
||||
else:
|
||||
raise ValueError(f'Unsupported model "{args.model}"')
|
||||
# NOTE: generate() requires padding_side to be "left"
|
||||
tokenizer.padding_side = "left"
|
||||
|
||||
prompt_dataset = PromptDataset(
|
||||
tokenizer=tokenizer,
|
||||
data_path=args.prompt_dataset,
|
||||
max_datasets_size=args.max_datasets_size,
|
||||
max_length=args.max_input_len,
|
||||
)
|
||||
if dist.is_initialized() and dist.get_world_size() > 1:
|
||||
prompt_sampler = DistributedSampler(prompt_dataset, shuffle=True, seed=42, drop_last=True)
|
||||
else:
|
||||
prompt_sampler = None
|
||||
prompt_dataloader = DataLoader(
|
||||
prompt_dataset, shuffle=(prompt_sampler is None), sampler=prompt_sampler, batch_size=args.experience_batch_size
|
||||
)
|
||||
|
||||
pretrain_dataset = SupervisedDataset(
|
||||
tokenizer=tokenizer,
|
||||
data_path=args.pretrain_dataset,
|
||||
max_datasets_size=args.max_datasets_size,
|
||||
max_length=args.max_input_len,
|
||||
)
|
||||
if dist.is_initialized() and dist.get_world_size() > 1:
|
||||
pretrain_sampler = DistributedSampler(pretrain_dataset, shuffle=True, seed=42, drop_last=True)
|
||||
else:
|
||||
pretrain_sampler = None
|
||||
pretrain_dataloader = DataLoader(
|
||||
pretrain_dataset, shuffle=(pretrain_sampler is None), sampler=pretrain_sampler, batch_size=args.ptx_batch_size
|
||||
)
|
||||
|
||||
# NOTE: For small models like opt-1.3b, reward model and initial model are not required to be parallelized.
|
||||
(actor, actor_optim), (critic, critic_optim), reward_model, initial_model = strategy.prepare(
|
||||
(actor, actor_optim), (critic, critic_optim), reward_model, initial_model
|
||||
)
|
||||
|
||||
# configure trainer
|
||||
trainer = PPOTrainer(
|
||||
strategy,
|
||||
actor,
|
||||
critic,
|
||||
reward_model,
|
||||
initial_model,
|
||||
actor_optim,
|
||||
critic_optim,
|
||||
tokenizer=tokenizer,
|
||||
kl_coef=args.kl_coef,
|
||||
ptx_coef=args.ptx_coef,
|
||||
train_batch_size=args.train_batch_size,
|
||||
max_length=args.max_seq_len,
|
||||
use_cache=True,
|
||||
do_sample=True,
|
||||
temperature=1.0,
|
||||
top_k=50,
|
||||
offload_inference_models=args.strategy != "colossalai_gemini",
|
||||
)
|
||||
|
||||
trainer.fit(
|
||||
num_episodes=args.num_episodes,
|
||||
num_collect_steps=args.num_collect_steps,
|
||||
num_update_steps=args.num_update_steps,
|
||||
prompt_dataloader=prompt_dataloader,
|
||||
pretrain_dataloader=pretrain_dataloader,
|
||||
log_dir=args.log_dir,
|
||||
use_wandb=args.use_wandb,
|
||||
)
|
||||
|
||||
if args.lora_rank > 0 and args.merge_lora_weights:
|
||||
from coati.models.lora import LORA_MANAGER
|
||||
|
||||
# NOTE: set model to eval to merge LoRA weights
|
||||
LORA_MANAGER.merge_weights = True
|
||||
actor.eval()
|
||||
# save model checkpoint after fitting
|
||||
strategy.save_pretrained(actor, path=args.save_path)
|
||||
# save optimizer checkpoint on all ranks
|
||||
if args.need_optim_ckpt:
|
||||
strategy.save_optimizer(
|
||||
actor_optim, "actor_optim_checkpoint_prompts_%d.pt" % (torch.cuda.current_device()), only_rank0=False
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--prompt_dataset", type=str, default=None, help="path to the prompt dataset")
|
||||
parser.add_argument("--pretrain_dataset", type=str, default=None, help="path to the pretrained dataset")
|
||||
parser.add_argument("--max_datasets_size", type=int, default=50000)
|
||||
parser.add_argument(
|
||||
"--strategy",
|
||||
choices=["ddp", "colossalai_gemini", "colossalai_zero2"],
|
||||
default="colossalai_zero2",
|
||||
help="strategy to use",
|
||||
)
|
||||
parser.add_argument("--model", default="gpt2", choices=["gpt2", "bloom", "opt", "llama"])
|
||||
parser.add_argument("--tokenizer", type=str, default=None)
|
||||
parser.add_argument("--pretrain", type=str, default=None)
|
||||
parser.add_argument("--rm_model", default=None, choices=["gpt2", "bloom", "opt", "llama"])
|
||||
parser.add_argument("--rm_path", type=str, default=None)
|
||||
parser.add_argument("--rm_pretrain", type=str, default=None)
|
||||
parser.add_argument("--save_path", type=str, default="actor_checkpoint_prompts")
|
||||
parser.add_argument("--need_optim_ckpt", type=bool, default=False)
|
||||
parser.add_argument("--num_episodes", type=int, default=10)
|
||||
parser.add_argument("--num_collect_steps", type=int, default=10)
|
||||
parser.add_argument("--num_update_steps", type=int, default=5)
|
||||
parser.add_argument("--train_batch_size", type=int, default=8)
|
||||
parser.add_argument("--ptx_batch_size", type=int, default=1)
|
||||
parser.add_argument("--experience_batch_size", type=int, default=8)
|
||||
parser.add_argument("--lora_rank", type=int, default=0, help="low-rank adaptation matrices rank")
|
||||
parser.add_argument("--merge_lora_weights", type=bool, default=True)
|
||||
parser.add_argument("--lr", type=float, default=1e-7)
|
||||
parser.add_argument("--kl_coef", type=float, default=0.1)
|
||||
parser.add_argument("--ptx_coef", type=float, default=0.9)
|
||||
parser.add_argument("--max_input_len", type=int, default=96)
|
||||
parser.add_argument("--max_seq_len", type=int, default=128)
|
||||
parser.add_argument("--log_dir", default="logs", type=str)
|
||||
parser.add_argument("--use_wandb", default=False, action="store_true")
|
||||
args = parser.parse_args()
|
||||
main(args)
|
|
@ -1,25 +0,0 @@
|
|||
set_n_least_used_CUDA_VISIBLE_DEVICES() {
|
||||
local n=${1:-"9999"}
|
||||
echo "GPU Memory Usage:"
|
||||
local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv |
|
||||
tail -n +2 |
|
||||
nl -v 0 |
|
||||
tee /dev/tty |
|
||||
sort -g -k 2 |
|
||||
awk '{print $1}' |
|
||||
head -n $n)
|
||||
export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g')
|
||||
echo "Now CUDA_VISIBLE_DEVICES is set to:"
|
||||
echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"
|
||||
}
|
||||
|
||||
set_n_least_used_CUDA_VISIBLE_DEVICES 2
|
||||
|
||||
# torchrun --standalone --nproc_per_node=2 train_prompts.py prompts.csv --strategy colossalai_zero2
|
||||
|
||||
torchrun --standalone --nproc_per_node=2 train_prompts.py \
|
||||
--pretrain_dataset /path/to/data.json \
|
||||
--prompt_dataset /path/to/data.json \
|
||||
--strategy colossalai_zero2 \
|
||||
--num_episodes 1 --num_collect_steps 2 --num_update_steps 1 \
|
||||
--train_batch_size 2
|
|
@ -1,208 +0,0 @@
|
|||
import argparse
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from coati.dataset import HhRlhfDataset, RmStaticDataset
|
||||
from coati.models import LogExpLoss, LogSigLoss
|
||||
from coati.models.bloom import BLOOMRM
|
||||
from coati.models.gpt import GPTRM
|
||||
from coati.models.llama import LlamaRM
|
||||
from coati.models.opt import OPTRM
|
||||
from coati.trainer import RewardModelTrainer
|
||||
from coati.trainer.strategies import DDPStrategy, GeminiStrategy, LowLevelZeroStrategy
|
||||
from datasets import load_dataset
|
||||
from torch.optim import Adam
|
||||
from torch.optim.lr_scheduler import CosineAnnealingLR
|
||||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data.distributed import DistributedSampler
|
||||
from transformers import AutoTokenizer, BloomTokenizerFast, LlamaTokenizer
|
||||
from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
|
||||
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
|
||||
|
||||
def train(args):
|
||||
# configure strategy
|
||||
if args.strategy == "ddp":
|
||||
strategy = DDPStrategy()
|
||||
elif args.strategy == "colossalai_gemini":
|
||||
strategy = GeminiStrategy(placement_policy="auto")
|
||||
elif args.strategy == "colossalai_zero2":
|
||||
strategy = LowLevelZeroStrategy(stage=2, placement_policy="cuda")
|
||||
else:
|
||||
raise ValueError(f'Unsupported strategy "{args.strategy}"')
|
||||
|
||||
# configure model
|
||||
if args.lora_rank > 0:
|
||||
warnings.warn("Lora is not supported yet.")
|
||||
args.lora_rank = 0
|
||||
|
||||
with strategy.model_init_context():
|
||||
if args.model == "bloom":
|
||||
model = BLOOMRM(pretrained=args.pretrain, lora_rank=args.lora_rank)
|
||||
elif args.model == "opt":
|
||||
model = OPTRM(pretrained=args.pretrain, lora_rank=args.lora_rank)
|
||||
elif args.model == "gpt2":
|
||||
model = GPTRM(pretrained=args.pretrain, lora_rank=args.lora_rank)
|
||||
elif args.model == "llama":
|
||||
model = LlamaRM(pretrained=args.pretrain, lora_rank=args.lora_rank)
|
||||
else:
|
||||
raise ValueError(f'Unsupported model "{args.model}"')
|
||||
|
||||
model.to(torch.bfloat16).to(torch.cuda.current_device())
|
||||
|
||||
if args.model_path is not None:
|
||||
state_dict = torch.load(args.model_path)
|
||||
model.load_state_dict(state_dict)
|
||||
|
||||
# configure tokenizer
|
||||
if args.model == "gpt2":
|
||||
tokenizer = GPT2Tokenizer.from_pretrained("gpt2" if args.tokenizer is None else args.tokenizer)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif args.model == "bloom":
|
||||
tokenizer = BloomTokenizerFast.from_pretrained(
|
||||
"bigscience/bloom-560m" if args.tokenizer is None else args.tokenizer
|
||||
)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif args.model == "opt":
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m" if args.tokenizer is None else args.tokenizer)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif args.model == "llama":
|
||||
tokenizer = LlamaTokenizer.from_pretrained(
|
||||
"hf-internal-testing/llama-tokenizer" if args.tokenizer is None else args.tokenizer
|
||||
)
|
||||
tokenizer.eos_token = "</s>"
|
||||
tokenizer.pad_token = tokenizer.unk_token
|
||||
else:
|
||||
raise ValueError(f'Unsupported model "{args.model}"')
|
||||
|
||||
# configure optimizer
|
||||
if args.strategy.startswith("colossalai"):
|
||||
optim = HybridAdam(model.parameters(), lr=args.lr)
|
||||
else:
|
||||
optim = Adam(model.parameters(), lr=args.lr)
|
||||
|
||||
# configure loss function
|
||||
if args.loss_fn == "log_sig":
|
||||
loss_fn = LogSigLoss()
|
||||
elif args.loss_fn == "log_exp":
|
||||
loss_fn = LogExpLoss()
|
||||
else:
|
||||
raise ValueError(f'Unsupported loss function "{args.loss_fn}"')
|
||||
|
||||
# prepare for data and dataset
|
||||
if args.subset is not None:
|
||||
data = load_dataset(args.dataset, data_dir=args.subset)
|
||||
else:
|
||||
data = load_dataset(args.dataset)
|
||||
|
||||
train_data = data["train"].select(range(min(args.max_datasets_size, len(data["train"]))))
|
||||
eval_data = data["test"].select(range(min(args.max_datasets_size, len(data["test"]))))
|
||||
|
||||
if args.dataset == "Dahoas/rm-static":
|
||||
train_dataset = RmStaticDataset(train_data, tokenizer, args.max_len)
|
||||
eval_dataset = RmStaticDataset(eval_data, tokenizer, args.max_len)
|
||||
elif args.dataset == "Anthropic/hh-rlhf":
|
||||
train_dataset = HhRlhfDataset(train_data, tokenizer, args.max_len)
|
||||
eval_dataset = HhRlhfDataset(eval_data, tokenizer, args.max_len)
|
||||
else:
|
||||
raise ValueError(f'Unsupported dataset "{args.dataset}"')
|
||||
|
||||
if dist.is_initialized() and dist.get_world_size() > 1:
|
||||
train_sampler = DistributedSampler(
|
||||
train_dataset,
|
||||
shuffle=True,
|
||||
seed=42,
|
||||
drop_last=True,
|
||||
rank=dist.get_rank(),
|
||||
num_replicas=dist.get_world_size(),
|
||||
)
|
||||
eval_sampler = DistributedSampler(
|
||||
eval_dataset,
|
||||
shuffle=True,
|
||||
seed=42,
|
||||
drop_last=True,
|
||||
rank=dist.get_rank(),
|
||||
num_replicas=dist.get_world_size(),
|
||||
)
|
||||
else:
|
||||
train_sampler = None
|
||||
eval_sampler = None
|
||||
|
||||
train_dataloader = DataLoader(
|
||||
train_dataset,
|
||||
shuffle=(train_sampler is None),
|
||||
sampler=train_sampler,
|
||||
batch_size=args.batch_size,
|
||||
pin_memory=True,
|
||||
)
|
||||
|
||||
eval_dataloader = DataLoader(
|
||||
eval_dataset, shuffle=(eval_sampler is None), sampler=eval_sampler, batch_size=args.batch_size, pin_memory=True
|
||||
)
|
||||
|
||||
lr_scheduler = CosineAnnealingLR(optim, train_dataloader.__len__() // 100)
|
||||
strategy_dict = strategy.prepare(dict(model=model, optimizer=optim, lr_scheduler=lr_scheduler))
|
||||
model = strategy_dict["model"]
|
||||
optim = strategy_dict["optimizer"]
|
||||
lr_scheduler = strategy_dict["lr_scheduler"]
|
||||
trainer = RewardModelTrainer(
|
||||
model=model,
|
||||
strategy=strategy,
|
||||
optim=optim,
|
||||
lr_scheduler=lr_scheduler,
|
||||
loss_fn=loss_fn,
|
||||
max_epochs=args.max_epochs,
|
||||
)
|
||||
|
||||
trainer.fit(
|
||||
train_dataloader=train_dataloader,
|
||||
eval_dataloader=eval_dataloader,
|
||||
log_dir=args.log_dir,
|
||||
use_wandb=args.use_wandb,
|
||||
)
|
||||
|
||||
if args.lora_rank > 0 and args.merge_lora_weights:
|
||||
from coati.models.lora import LORA_MANAGER
|
||||
|
||||
# NOTE: set model to eval to merge LoRA weights
|
||||
LORA_MANAGER.merge_weights = True
|
||||
model.eval()
|
||||
# save model checkpoint after fitting on only rank0
|
||||
state_dict = model.state_dict()
|
||||
torch.save(state_dict, args.save_path)
|
||||
# save optimizer checkpoint on all ranks
|
||||
if args.need_optim_ckpt:
|
||||
strategy.save_optimizer(
|
||||
trainer.optimizer, "rm_optim_checkpoint_%d.pt" % (torch.cuda.current_device()), only_rank0=False
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--strategy", choices=["ddp", "colossalai_gemini", "colossalai_zero2"], default="colossalai_zero2"
|
||||
)
|
||||
parser.add_argument("--model", choices=["gpt2", "bloom", "opt", "llama"], default="bloom")
|
||||
parser.add_argument("--tokenizer", type=str, default=None)
|
||||
parser.add_argument("--pretrain", type=str, default=None)
|
||||
parser.add_argument("--model_path", type=str, default=None)
|
||||
parser.add_argument("--need_optim_ckpt", type=bool, default=False)
|
||||
parser.add_argument(
|
||||
"--dataset", type=str, choices=["Anthropic/hh-rlhf", "Dahoas/rm-static"], default="Dahoas/rm-static"
|
||||
)
|
||||
parser.add_argument("--subset", type=lambda x: None if x == "None" else x, default=None)
|
||||
parser.add_argument("--max_datasets_size", type=int, default=1000000)
|
||||
parser.add_argument("--save_path", type=str, default="rm_ckpt")
|
||||
parser.add_argument("--max_epochs", type=int, default=1)
|
||||
parser.add_argument("--batch_size", type=int, default=1)
|
||||
parser.add_argument("--max_len", type=int, default=512)
|
||||
parser.add_argument("--lora_rank", type=int, default=0, help="low-rank adaptation matrices rank")
|
||||
parser.add_argument("--merge_lora_weights", type=bool, default=True)
|
||||
parser.add_argument("--lr", type=float, default=9e-6)
|
||||
parser.add_argument("--loss_fn", type=str, default="log_sig", choices=["log_sig", "log_exp"])
|
||||
parser.add_argument("--log_dir", default="logs", type=str)
|
||||
parser.add_argument("--use_wandb", default=False, action="store_true")
|
||||
args = parser.parse_args()
|
||||
train(args)
|
|
@ -1,25 +0,0 @@
|
|||
set_n_least_used_CUDA_VISIBLE_DEVICES() {
|
||||
local n=${1:-"9999"}
|
||||
echo "GPU Memory Usage:"
|
||||
local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv |
|
||||
tail -n +2 |
|
||||
nl -v 0 |
|
||||
tee /dev/tty |
|
||||
sort -g -k 2 |
|
||||
awk '{print $1}' |
|
||||
head -n $n)
|
||||
export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g')
|
||||
echo "Now CUDA_VISIBLE_DEVICES is set to:"
|
||||
echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"
|
||||
}
|
||||
|
||||
set_n_least_used_CUDA_VISIBLE_DEVICES 2
|
||||
|
||||
torchrun --standalone --nproc_per_node=2 train_reward_model.py \
|
||||
--pretrain 'gpt2' \
|
||||
--model 'gpt2' \
|
||||
--strategy colossalai_zero2 \
|
||||
--loss_fn 'log_exp' \
|
||||
--dataset 'Anthropic/hh-rlhf' \
|
||||
--batch_size 16 \
|
||||
--max_epochs 10
|
|
@ -1,221 +0,0 @@
|
|||
import argparse
|
||||
import math
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from coati.dataset import SFTDataset, SupervisedDataset
|
||||
from coati.models.bloom import BLOOMActor
|
||||
from coati.models.chatglm import ChatGLMActor
|
||||
from coati.models.chatglm.chatglm_tokenizer import ChatGLMTokenizer
|
||||
from coati.models.gpt import GPTActor
|
||||
from coati.models.llama import LlamaActor
|
||||
from coati.models.opt import OPTActor
|
||||
from coati.trainer import SFTTrainer
|
||||
from coati.trainer.strategies import DDPStrategy, GeminiStrategy, LowLevelZeroStrategy
|
||||
from datasets import load_dataset
|
||||
from torch.optim import Adam
|
||||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data.distributed import DistributedSampler
|
||||
from transformers import AutoTokenizer, BloomTokenizerFast, LlamaTokenizer
|
||||
from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
|
||||
from transformers.trainer import get_scheduler
|
||||
|
||||
from colossalai.logging import get_dist_logger
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
|
||||
|
||||
def train(args):
|
||||
# configure strategy
|
||||
if args.strategy == "ddp":
|
||||
strategy = DDPStrategy()
|
||||
elif args.strategy == "colossalai_gemini":
|
||||
strategy = GeminiStrategy(placement_policy="auto")
|
||||
elif args.strategy == "colossalai_zero2":
|
||||
strategy = LowLevelZeroStrategy(stage=2, placement_policy="cuda")
|
||||
elif args.strategy == "colossalai_zero2_cpu":
|
||||
strategy = LowLevelZeroStrategy(stage=2, placement_policy="cpu")
|
||||
else:
|
||||
raise ValueError(f'Unsupported strategy "{args.strategy}"')
|
||||
|
||||
# configure model
|
||||
if args.lora_rank > 0:
|
||||
warnings.warn("Lora is not supported yet.")
|
||||
args.lora_rank = 0
|
||||
|
||||
with strategy.model_init_context():
|
||||
if args.model == "bloom":
|
||||
model = BLOOMActor(pretrained=args.pretrain, lora_rank=args.lora_rank, checkpoint=args.grad_checkpoint)
|
||||
elif args.model == "opt":
|
||||
model = OPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank, checkpoint=args.grad_checkpoint)
|
||||
elif args.model == "gpt2":
|
||||
model = GPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank, checkpoint=args.grad_checkpoint)
|
||||
elif args.model == "llama":
|
||||
model = LlamaActor(pretrained=args.pretrain, lora_rank=args.lora_rank, checkpoint=args.grad_checkpoint)
|
||||
elif args.model == "chatglm":
|
||||
model = ChatGLMActor(pretrained=args.pretrain)
|
||||
else:
|
||||
raise ValueError(f'Unsupported model "{args.model}"')
|
||||
|
||||
model.to(torch.bfloat16).to(torch.cuda.current_device())
|
||||
|
||||
# configure tokenizer
|
||||
if args.model == "gpt2":
|
||||
tokenizer = GPT2Tokenizer.from_pretrained("gpt2" if args.tokenizer is None else args.tokenizer)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif args.model == "bloom":
|
||||
tokenizer = BloomTokenizerFast.from_pretrained(
|
||||
"bigscience/bloom-560m" if args.tokenizer is None else args.tokenizer
|
||||
)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif args.model == "opt":
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m" if args.tokenizer is None else args.tokenizer)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif args.model == "llama":
|
||||
tokenizer = LlamaTokenizer.from_pretrained(
|
||||
"hf-internal-testing/llama-tokenizer" if args.tokenizer is None else args.tokenizer
|
||||
)
|
||||
tokenizer.eos_token = "</s>"
|
||||
tokenizer.pad_token = tokenizer.unk_token
|
||||
elif args.model == "chatglm":
|
||||
tokenizer = ChatGLMTokenizer.from_pretrained(
|
||||
"THUDM/chatglm-6b" if args.tokenizer is None else args.tokenizer, trust_remote_code=True
|
||||
)
|
||||
else:
|
||||
raise ValueError(f'Unsupported model "{args.model}"')
|
||||
|
||||
# configure optimizer
|
||||
if args.strategy.startswith("colossalai"):
|
||||
optim = HybridAdam(model.parameters(), lr=args.lr, clipping_norm=1.0)
|
||||
else:
|
||||
optim = Adam(model.parameters(), lr=args.lr)
|
||||
|
||||
# configure dataset
|
||||
if args.dataset == "yizhongw/self_instruct":
|
||||
train_data = load_dataset(args.dataset, "super_natural_instructions", split="train")
|
||||
eval_data = load_dataset(args.dataset, "super_natural_instructions", split="test")
|
||||
|
||||
if args.max_datasets_size is not None:
|
||||
train_data = train_data.select(range(min(args.max_datasets_size, len(train_data))))
|
||||
eval_data = eval_data.select(range(min(args.max_datasets_size, len(eval_data))))
|
||||
|
||||
train_dataset = SFTDataset(train_data, tokenizer, args.max_len)
|
||||
eval_dataset = SFTDataset(eval_data, tokenizer, args.max_len)
|
||||
|
||||
else:
|
||||
train_dataset = SupervisedDataset(
|
||||
tokenizer=tokenizer,
|
||||
data_path=args.dataset,
|
||||
max_datasets_size=args.max_datasets_size,
|
||||
max_length=args.max_len,
|
||||
)
|
||||
eval_dataset = None
|
||||
|
||||
if dist.is_initialized() and dist.get_world_size() > 1:
|
||||
train_sampler = DistributedSampler(
|
||||
train_dataset,
|
||||
shuffle=True,
|
||||
seed=42,
|
||||
drop_last=True,
|
||||
rank=dist.get_rank(),
|
||||
num_replicas=dist.get_world_size(),
|
||||
)
|
||||
if eval_dataset is not None:
|
||||
eval_sampler = DistributedSampler(
|
||||
eval_dataset,
|
||||
shuffle=False,
|
||||
seed=42,
|
||||
drop_last=False,
|
||||
rank=dist.get_rank(),
|
||||
num_replicas=dist.get_world_size(),
|
||||
)
|
||||
else:
|
||||
train_sampler = None
|
||||
eval_sampler = None
|
||||
|
||||
train_dataloader = DataLoader(
|
||||
train_dataset,
|
||||
shuffle=(train_sampler is None),
|
||||
sampler=train_sampler,
|
||||
batch_size=args.batch_size,
|
||||
pin_memory=True,
|
||||
)
|
||||
if eval_dataset is not None:
|
||||
eval_dataloader = DataLoader(
|
||||
eval_dataset,
|
||||
shuffle=(eval_sampler is None),
|
||||
sampler=eval_sampler,
|
||||
batch_size=args.batch_size,
|
||||
pin_memory=True,
|
||||
)
|
||||
else:
|
||||
eval_dataloader = None
|
||||
|
||||
num_update_steps_per_epoch = len(train_dataloader) // args.accumulation_steps
|
||||
max_steps = math.ceil(args.max_epochs * num_update_steps_per_epoch)
|
||||
lr_scheduler = get_scheduler(
|
||||
"cosine", optim, num_warmup_steps=math.ceil(max_steps * 0.03), num_training_steps=max_steps
|
||||
)
|
||||
strategy_dict = strategy.prepare(dict(model=model, optimizer=optim, lr_scheduler=lr_scheduler))
|
||||
model = strategy_dict["model"]
|
||||
optim = strategy_dict["optimizer"]
|
||||
lr_scheduler = strategy_dict["lr_scheduler"]
|
||||
trainer = SFTTrainer(
|
||||
model=model,
|
||||
strategy=strategy,
|
||||
optim=optim,
|
||||
lr_scheduler=lr_scheduler,
|
||||
max_epochs=args.max_epochs,
|
||||
accumulation_steps=args.accumulation_steps,
|
||||
)
|
||||
|
||||
logger = get_dist_logger()
|
||||
trainer.fit(
|
||||
train_dataloader=train_dataloader,
|
||||
eval_dataloader=eval_dataloader,
|
||||
logger=logger,
|
||||
log_dir=args.log_dir,
|
||||
use_wandb=args.use_wandb,
|
||||
)
|
||||
|
||||
if args.lora_rank > 0 and args.merge_lora_weights:
|
||||
from coati.models.lora import LORA_MANAGER
|
||||
|
||||
# NOTE: set model to eval to merge LoRA weights
|
||||
LORA_MANAGER.merge_weights = True
|
||||
model.eval()
|
||||
# save model checkpoint after fitting on only rank0
|
||||
strategy.save_pretrained(model, path=args.save_path, tokenizer=tokenizer)
|
||||
# save optimizer checkpoint on all ranks
|
||||
if args.need_optim_ckpt:
|
||||
strategy.save_optimizer(
|
||||
trainer.optimizer, "rm_optim_checkpoint_%d.pt" % (torch.cuda.current_device()), only_rank0=False
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--strategy",
|
||||
choices=["ddp", "colossalai_gemini", "colossalai_zero2", "colossalai_zero2_cpu"],
|
||||
default="colossalai_zero2",
|
||||
)
|
||||
parser.add_argument("--model", choices=["gpt2", "bloom", "opt", "llama", "chatglm"], default="bloom")
|
||||
parser.add_argument("--tokenizer", type=str, default=None)
|
||||
parser.add_argument("--pretrain", type=str, default=None)
|
||||
parser.add_argument("--dataset", type=str, default=None)
|
||||
parser.add_argument("--max_datasets_size", type=int, default=None)
|
||||
parser.add_argument("--save_path", type=str, default="output")
|
||||
parser.add_argument("--need_optim_ckpt", type=bool, default=False)
|
||||
parser.add_argument("--max_epochs", type=int, default=3)
|
||||
parser.add_argument("--batch_size", type=int, default=4)
|
||||
parser.add_argument("--max_len", type=int, default=512)
|
||||
parser.add_argument("--lora_rank", type=int, default=0, help="low-rank adaptation matrices rank")
|
||||
parser.add_argument("--merge_lora_weights", type=bool, default=True)
|
||||
parser.add_argument("--lr", type=float, default=5e-6)
|
||||
parser.add_argument("--accumulation_steps", type=int, default=8)
|
||||
parser.add_argument("--log_dir", default="logs", type=str)
|
||||
parser.add_argument("--use_wandb", default=False, action="store_true")
|
||||
parser.add_argument("--grad_checkpoint", default=False, action="store_true")
|
||||
args = parser.parse_args()
|
||||
train(args)
|
|
@ -1,28 +0,0 @@
|
|||
set_n_least_used_CUDA_VISIBLE_DEVICES() {
|
||||
local n=${1:-"9999"}
|
||||
echo "GPU Memory Usage:"
|
||||
local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv |
|
||||
tail -n +2 |
|
||||
nl -v 0 |
|
||||
tee /dev/tty |
|
||||
sort -g -k 2 |
|
||||
awk '{print $1}' |
|
||||
head -n $n)
|
||||
export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g')
|
||||
echo "Now CUDA_VISIBLE_DEVICES is set to:"
|
||||
echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"
|
||||
}
|
||||
|
||||
set_n_least_used_CUDA_VISIBLE_DEVICES 4
|
||||
|
||||
torchrun --standalone --nproc_per_node=4 train_sft.py \
|
||||
--pretrain "/path/to/LLaMa-7B/" \
|
||||
--model 'llama' \
|
||||
--strategy colossalai_zero2 \
|
||||
--save_path /path/to/Coati-7B \
|
||||
--dataset /path/to/data.json \
|
||||
--batch_size 4 \
|
||||
--accumulation_steps 8 \
|
||||
--lr 2e-5 \
|
||||
--max_datasets_size 512 \
|
||||
--max_epochs 1
|
|
@ -1,141 +0,0 @@
|
|||
# Adapted from https://github.com/tloen/alpaca-lora/blob/main/generate.py
|
||||
|
||||
import argparse
|
||||
from time import time
|
||||
|
||||
import torch
|
||||
from coati.quant import llama_load_quant, low_resource_init
|
||||
from transformers import AutoTokenizer, GenerationConfig, LlamaConfig, LlamaForCausalLM
|
||||
|
||||
|
||||
def generate_prompt(instruction, input=None):
|
||||
if input:
|
||||
return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
||||
|
||||
### Instruction:
|
||||
{instruction}
|
||||
|
||||
### Input:
|
||||
{input}
|
||||
|
||||
### Response:"""
|
||||
else:
|
||||
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
||||
|
||||
### Instruction:
|
||||
{instruction}
|
||||
|
||||
### Response:"""
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def evaluate(
|
||||
model,
|
||||
tokenizer,
|
||||
instruction,
|
||||
input=None,
|
||||
temperature=0.1,
|
||||
top_p=0.75,
|
||||
top_k=40,
|
||||
num_beams=4,
|
||||
max_new_tokens=128,
|
||||
**kwargs,
|
||||
):
|
||||
prompt = generate_prompt(instruction, input)
|
||||
inputs = tokenizer(prompt, return_tensors="pt")
|
||||
input_ids = inputs["input_ids"].cuda()
|
||||
generation_config = GenerationConfig(
|
||||
temperature=temperature,
|
||||
top_p=top_p,
|
||||
top_k=top_k,
|
||||
num_beams=num_beams,
|
||||
**kwargs,
|
||||
)
|
||||
generation_output = model.generate(
|
||||
input_ids=input_ids,
|
||||
generation_config=generation_config,
|
||||
return_dict_in_generate=True,
|
||||
output_scores=True,
|
||||
max_new_tokens=max_new_tokens,
|
||||
do_sample=True,
|
||||
)
|
||||
s = generation_output.sequences[0]
|
||||
output = tokenizer.decode(s)
|
||||
n_new_tokens = s.size(0) - input_ids.size(1)
|
||||
return output.split("### Response:")[1].strip(), n_new_tokens
|
||||
|
||||
|
||||
instructions = [
|
||||
"Tell me about alpacas.",
|
||||
"Tell me about the president of Mexico in 2019.",
|
||||
"Tell me about the king of France in 2019.",
|
||||
"List all Canadian provinces in alphabetical order.",
|
||||
"Write a Python program that prints the first 10 Fibonacci numbers.",
|
||||
"Write a program that prints the numbers from 1 to 100. But for multiples of three print 'Fizz' instead of the number and for the multiples of five print 'Buzz'. For numbers which are multiples of both three and five print 'FizzBuzz'.",
|
||||
"Tell me five words that rhyme with 'shock'.",
|
||||
"Translate the sentence 'I have no mouth but I must scream' into Spanish.",
|
||||
"Count up from 1 to 500.",
|
||||
# ===
|
||||
"How to play support in legends of league",
|
||||
"Write a Python program that calculate Fibonacci numbers.",
|
||||
]
|
||||
inst = [instructions[0]] * 4
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"pretrained",
|
||||
help="Path to pretrained model. Can be a local path or a model name from the HuggingFace model hub.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--quant",
|
||||
choices=["8bit", "4bit"],
|
||||
default=None,
|
||||
help="Quantization mode. Default: None (no quantization, fp16).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--gptq_checkpoint",
|
||||
default=None,
|
||||
help="Path to GPTQ checkpoint. This is only useful when quantization mode is 4bit. Default: None.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--gptq_group_size",
|
||||
type=int,
|
||||
default=128,
|
||||
help="Group size for GPTQ. This is only useful when quantization mode is 4bit. Default: 128.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.quant == "4bit":
|
||||
assert args.gptq_checkpoint is not None, "Please specify a GPTQ checkpoint."
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.pretrained)
|
||||
|
||||
if args.quant == "4bit":
|
||||
with low_resource_init():
|
||||
config = LlamaConfig.from_pretrained(args.pretrained)
|
||||
model = LlamaForCausalLM(config)
|
||||
model = llama_load_quant(model, args.gptq_checkpoint, 4, args.gptq_group_size)
|
||||
model.cuda()
|
||||
else:
|
||||
model = LlamaForCausalLM.from_pretrained(
|
||||
args.pretrained,
|
||||
load_in_8bit=(args.quant == "8bit"),
|
||||
torch_dtype=torch.float16,
|
||||
device_map="auto",
|
||||
)
|
||||
if args.quant != "8bit":
|
||||
model.half() # seems to fix bugs for some users.
|
||||
model.eval()
|
||||
|
||||
total_tokens = 0
|
||||
start = time()
|
||||
for instruction in instructions:
|
||||
print(f"Instruction: {instruction}")
|
||||
resp, tokens = evaluate(model, tokenizer, instruction, temperature=0.2, num_beams=1)
|
||||
total_tokens += tokens
|
||||
print(f"Response: {resp}")
|
||||
print("\n----------------------------\n")
|
||||
duration = time() - start
|
||||
print(f"Total time: {duration:.3f} s, {total_tokens/duration:.3f} tokens/s")
|
||||
print(f"Peak CUDA mem: {torch.cuda.max_memory_allocated()/1024**3:.3f} GB")
|
|
@ -1,61 +0,0 @@
|
|||
import os
|
||||
|
||||
from transformers import AutoTokenizer
|
||||
from utils import ChatPromptProcessor, Dialogue
|
||||
|
||||
CONTEXT = "Below is an instruction that describes a task. Write a response that appropriately completes the request. Do not generate new instructions."
|
||||
tokenizer = AutoTokenizer.from_pretrained(os.environ["PRETRAINED_PATH"])
|
||||
|
||||
samples = [
|
||||
(
|
||||
[
|
||||
Dialogue(
|
||||
instruction="Who is the best player in the history of NBA?",
|
||||
response="The best player in the history of the NBA is widely considered to be Michael Jordan. He is one of the most successful players in the league, having won 6 NBA championships with the Chicago Bulls and 5 more with the Washington Wizards. He is a 5-time MVP, 1",
|
||||
),
|
||||
Dialogue(instruction="continue this talk", response=""),
|
||||
],
|
||||
128,
|
||||
"Below is an instruction that describes a task. Write a response that appropriately completes the request. Do not generate new instructions.\n\n### Instruction:\nWho is the best player in the history of NBA?\n\n### Response:\nThe best player in the history of the NBA is widely considered to be Michael Jordan. He is one of the most successful players in the league, having won 6 NBA championships with the Chicago Bulls and 5 more with the Washington Wizards. He is a 5-time MVP, 1\n\n### Instruction:\ncontinue this talk\n\n### Response:\n",
|
||||
),
|
||||
(
|
||||
[
|
||||
Dialogue(
|
||||
instruction="Who is the best player in the history of NBA?",
|
||||
response="The best player in the history of the NBA is widely considered to be Michael Jordan. He is one of the most successful players in the league, having won 6 NBA championships with the Chicago Bulls and 5 more with the Washington Wizards. He is a 5-time MVP, 1",
|
||||
),
|
||||
Dialogue(instruction="continue this talk", response=""),
|
||||
],
|
||||
200,
|
||||
"Below is an instruction that describes a task. Write a response that appropriately completes the request. Do not generate new instructions.\n\n### Instruction:\ncontinue this talk\n\n### Response:\n",
|
||||
),
|
||||
(
|
||||
[
|
||||
Dialogue(
|
||||
instruction="Who is the best player in the history of NBA?",
|
||||
response="The best player in the history of the NBA is widely considered to be Michael Jordan. He is one of the most successful players in the league, having won 6 NBA championships with the Chicago Bulls and 5 more with the Washington Wizards. He is a 5-time MVP, 1",
|
||||
),
|
||||
Dialogue(instruction="continue this talk", response=""),
|
||||
],
|
||||
211,
|
||||
"Below is an instruction that describes a task. Write a response that appropriately completes the request. Do not generate new instructions.\n\n### Instruction:\ncontinue this\n\n### Response:\n",
|
||||
),
|
||||
(
|
||||
[
|
||||
Dialogue(instruction="Who is the best player in the history of NBA?", response=""),
|
||||
],
|
||||
128,
|
||||
"Below is an instruction that describes a task. Write a response that appropriately completes the request. Do not generate new instructions.\n\n### Instruction:\nWho is the best player in the history of NBA?\n\n### Response:\n",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def test_chat_prompt_processor():
|
||||
processor = ChatPromptProcessor(tokenizer, CONTEXT, 256)
|
||||
for history, max_new_tokens, result in samples:
|
||||
prompt = processor.preprocess_prompt(history, max_new_tokens)
|
||||
assert prompt == result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_chat_prompt_processor()
|
|
@ -1,209 +0,0 @@
|
|||
import json
|
||||
import re
|
||||
from threading import Lock
|
||||
from typing import Any, Callable, Generator, List, Optional
|
||||
|
||||
import jieba
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
try:
|
||||
from transformers.generation_logits_process import (
|
||||
LogitsProcessorList,
|
||||
TemperatureLogitsWarper,
|
||||
TopKLogitsWarper,
|
||||
TopPLogitsWarper,
|
||||
)
|
||||
except ImportError:
|
||||
from transformers.generation import LogitsProcessorList, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper
|
||||
|
||||
|
||||
def prepare_logits_processor(
|
||||
top_k: Optional[int] = None, top_p: Optional[float] = None, temperature: Optional[float] = None
|
||||
) -> LogitsProcessorList:
|
||||
processor_list = LogitsProcessorList()
|
||||
if temperature is not None and temperature != 1.0:
|
||||
processor_list.append(TemperatureLogitsWarper(temperature))
|
||||
if top_k is not None and top_k != 0:
|
||||
processor_list.append(TopKLogitsWarper(top_k))
|
||||
if top_p is not None and top_p < 1.0:
|
||||
processor_list.append(TopPLogitsWarper(top_p))
|
||||
return processor_list
|
||||
|
||||
|
||||
def _is_sequence_finished(unfinished_sequences: torch.Tensor) -> bool:
|
||||
if dist.is_initialized() and dist.get_world_size() > 1:
|
||||
# consider DP
|
||||
unfinished_sequences = unfinished_sequences.clone()
|
||||
dist.all_reduce(unfinished_sequences)
|
||||
return unfinished_sequences.max() == 0
|
||||
|
||||
|
||||
def sample_streamingly(
|
||||
model: nn.Module,
|
||||
input_ids: torch.Tensor,
|
||||
max_generate_tokens: int,
|
||||
early_stopping: bool = False,
|
||||
eos_token_id: Optional[int] = None,
|
||||
pad_token_id: Optional[int] = None,
|
||||
top_k: Optional[int] = None,
|
||||
top_p: Optional[float] = None,
|
||||
temperature: Optional[float] = None,
|
||||
prepare_inputs_fn: Optional[Callable[[torch.Tensor, Any], dict]] = None,
|
||||
update_model_kwargs_fn: Optional[Callable[[dict, Any], dict]] = None,
|
||||
**model_kwargs,
|
||||
) -> Generator:
|
||||
logits_processor = prepare_logits_processor(top_k, top_p, temperature)
|
||||
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
|
||||
|
||||
for _ in range(max_generate_tokens):
|
||||
model_inputs = (
|
||||
prepare_inputs_fn(input_ids, **model_kwargs) if prepare_inputs_fn is not None else {"input_ids": input_ids}
|
||||
)
|
||||
outputs = model(**model_inputs)
|
||||
|
||||
next_token_logits = outputs["logits"][:, -1, :]
|
||||
# pre-process distribution
|
||||
next_token_logits = logits_processor(input_ids, next_token_logits)
|
||||
# sample
|
||||
probs = torch.softmax(next_token_logits, dim=-1, dtype=torch.float)
|
||||
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
|
||||
|
||||
# finished sentences should have their next token be a padding token
|
||||
if eos_token_id is not None:
|
||||
if pad_token_id is None:
|
||||
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
|
||||
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
|
||||
|
||||
yield next_tokens
|
||||
|
||||
# update generated ids, model inputs for next step
|
||||
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
|
||||
if update_model_kwargs_fn is not None:
|
||||
model_kwargs = update_model_kwargs_fn(outputs, **model_kwargs)
|
||||
|
||||
# if eos_token was found in one sentence, set sentence to finished
|
||||
if eos_token_id is not None:
|
||||
unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long())
|
||||
|
||||
# stop when each sentence is finished if early_stopping=True
|
||||
if early_stopping and _is_sequence_finished(unfinished_sequences):
|
||||
break
|
||||
|
||||
|
||||
def update_model_kwargs_fn(outputs: dict, **model_kwargs) -> dict:
|
||||
if "past_key_values" in outputs:
|
||||
model_kwargs["past"] = outputs["past_key_values"]
|
||||
else:
|
||||
model_kwargs["past"] = None
|
||||
|
||||
# update token_type_ids with last value
|
||||
if "token_type_ids" in model_kwargs:
|
||||
token_type_ids = model_kwargs["token_type_ids"]
|
||||
model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1)
|
||||
|
||||
# update attention mask
|
||||
if "attention_mask" in model_kwargs:
|
||||
attention_mask = model_kwargs["attention_mask"]
|
||||
model_kwargs["attention_mask"] = torch.cat(
|
||||
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
|
||||
)
|
||||
|
||||
return model_kwargs
|
||||
|
||||
|
||||
class Dialogue(BaseModel):
|
||||
instruction: str = Field(min_length=1, example="Count up from 1 to 500.")
|
||||
response: str = Field(example="")
|
||||
|
||||
|
||||
def _format_dialogue(instruction: str, response: str = ""):
|
||||
return f"\n\n### Instruction:\n{instruction}\n\n### Response:\n{response}"
|
||||
|
||||
|
||||
STOP_PAT = re.compile(r"(###|instruction:).*", flags=(re.I | re.S))
|
||||
|
||||
|
||||
class ChatPromptProcessor:
|
||||
SAFE_RESPONSE = "The input/response contains inappropriate content, please rephrase your prompt."
|
||||
|
||||
def __init__(self, tokenizer, context: str, max_len: int = 2048, censored_words: List[str] = []):
|
||||
self.tokenizer = tokenizer
|
||||
self.context = context
|
||||
self.max_len = max_len
|
||||
self.censored_words = set([word.lower() for word in censored_words])
|
||||
# These will be initialized after the first call of preprocess_prompt()
|
||||
self.context_len: Optional[int] = None
|
||||
self.dialogue_placeholder_len: Optional[int] = None
|
||||
|
||||
def preprocess_prompt(self, history: List[Dialogue], max_new_tokens: int) -> str:
|
||||
if self.context_len is None:
|
||||
self.context_len = len(self.tokenizer(self.context)["input_ids"])
|
||||
if self.dialogue_placeholder_len is None:
|
||||
self.dialogue_placeholder_len = len(
|
||||
self.tokenizer(_format_dialogue(""), add_special_tokens=False)["input_ids"]
|
||||
)
|
||||
prompt = self.context
|
||||
# the last dialogue must be in the prompt
|
||||
last_dialogue = history.pop()
|
||||
# the response of the last dialogue is empty
|
||||
assert last_dialogue.response == ""
|
||||
if (
|
||||
len(self.tokenizer(_format_dialogue(last_dialogue.instruction), add_special_tokens=False)["input_ids"])
|
||||
+ max_new_tokens
|
||||
+ self.context_len
|
||||
>= self.max_len
|
||||
):
|
||||
# to avoid truncate placeholder, apply truncate to the original instruction
|
||||
instruction_truncated = self.tokenizer(
|
||||
last_dialogue.instruction,
|
||||
add_special_tokens=False,
|
||||
truncation=True,
|
||||
max_length=(self.max_len - max_new_tokens - self.context_len - self.dialogue_placeholder_len),
|
||||
)["input_ids"]
|
||||
instruction_truncated = self.tokenizer.decode(instruction_truncated).lstrip()
|
||||
prompt += _format_dialogue(instruction_truncated)
|
||||
return prompt
|
||||
|
||||
res_len = self.max_len - max_new_tokens - len(self.tokenizer(prompt)["input_ids"])
|
||||
|
||||
rows = []
|
||||
for dialogue in history[::-1]:
|
||||
text = _format_dialogue(dialogue.instruction, dialogue.response)
|
||||
cur_len = len(self.tokenizer(text, add_special_tokens=False)["input_ids"])
|
||||
if res_len - cur_len < 0:
|
||||
break
|
||||
res_len -= cur_len
|
||||
rows.insert(0, text)
|
||||
prompt += "".join(rows) + _format_dialogue(last_dialogue.instruction)
|
||||
return prompt
|
||||
|
||||
def postprocess_output(self, output: str) -> str:
|
||||
output = STOP_PAT.sub("", output)
|
||||
return output.strip()
|
||||
|
||||
def has_censored_words(self, text: str) -> bool:
|
||||
if len(self.censored_words) == 0:
|
||||
return False
|
||||
intersection = set(jieba.cut(text.lower())) & self.censored_words
|
||||
return len(intersection) > 0
|
||||
|
||||
|
||||
class LockedIterator:
|
||||
def __init__(self, it, lock: Lock) -> None:
|
||||
self.lock = lock
|
||||
self.it = iter(it)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
with self.lock:
|
||||
return next(self.it)
|
||||
|
||||
|
||||
def load_json(path: str):
|
||||
with open(path) as f:
|
||||
return json.load(f)
|
|
@ -1,2 +0,0 @@
|
|||
pytest
|
||||
colossalai==0.3.3
|
|
@ -1,14 +0,0 @@
|
|||
transformers>=4.20.1
|
||||
tqdm
|
||||
datasets
|
||||
loralib
|
||||
colossalai==0.3.3
|
||||
torch<2.0.0, >=1.12.1
|
||||
langchain
|
||||
tokenizers
|
||||
fastapi
|
||||
sse_starlette
|
||||
wandb
|
||||
sentencepiece
|
||||
gpustat
|
||||
tensorboard
|
|
@ -1,33 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -xue
|
||||
|
||||
echo "Hint: You can run this script with 'verbose' as the first argument to run all strategies."
|
||||
|
||||
if [[ $# -ne 0 && "$1" == "verbose" ]]; then
|
||||
STRATEGIES=(
|
||||
'ddp'
|
||||
'colossalai_gemini'
|
||||
'colossalai_gemini_cpu'
|
||||
'colossalai_zero2'
|
||||
'colossalai_zero2_cpu'
|
||||
'colossalai_zero1'
|
||||
'colossalai_zero1_cpu'
|
||||
)
|
||||
else
|
||||
STRATEGIES=(
|
||||
'colossalai_zero2'
|
||||
)
|
||||
fi
|
||||
|
||||
BASE_DIR=$(dirname $(dirname $(realpath $BASH_SOURCE)))
|
||||
BENCHMARKS_DIR=$BASE_DIR/benchmarks
|
||||
|
||||
echo "[Test]: testing benchmarks ..."
|
||||
|
||||
for strategy in ${STRATEGIES[@]}; do
|
||||
torchrun --standalone --nproc_per_node 1 $BENCHMARKS_DIR/benchmark_opt_lora_dummy.py \
|
||||
--model 125m --critic_model 125m --strategy ${strategy} --lora_rank 4 \
|
||||
--num_episodes 2 --num_collect_steps 4 --num_update_steps 2 \
|
||||
--train_batch_size 2 --experience_batch_size 4
|
||||
done
|
|
@ -1,91 +0,0 @@
|
|||
import os
|
||||
import tempfile
|
||||
from contextlib import nullcontext
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from coati.models.gpt import GPTActor
|
||||
from coati.models.utils import calc_action_log_probs
|
||||
from coati.trainer.strategies import DDPStrategy, GeminiStrategy, LowLevelZeroStrategy, Strategy
|
||||
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
|
||||
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.testing import rerun_if_address_is_in_use, spawn
|
||||
|
||||
GPT_CONFIG = GPT2Config(n_embd=128, n_layer=4, n_head=4)
|
||||
|
||||
|
||||
def get_data(batch_size: int, seq_len: int = 10) -> dict:
|
||||
input_ids = torch.randint(0, 50257, (batch_size, seq_len), device="cuda")
|
||||
attention_mask = torch.ones_like(input_ids)
|
||||
return dict(input_ids=input_ids, attention_mask=attention_mask)
|
||||
|
||||
|
||||
def train_step(strategy: Strategy, actor: GPTActor, actor_optim: HybridAdam, batch_size: int = 8):
|
||||
data = get_data(batch_size)
|
||||
action_mask = torch.ones_like(data["attention_mask"], dtype=torch.bool)
|
||||
actor_logits = actor(data["input_ids"], data["attention_mask"])["logits"]
|
||||
action_log_probs = calc_action_log_probs(actor_logits, data["input_ids"], action_mask.size(1))
|
||||
loss = action_log_probs.sum()
|
||||
strategy.backward(loss, actor, actor_optim)
|
||||
strategy.optimizer_step(actor_optim)
|
||||
|
||||
|
||||
def run_test_checkpoint(strategy_name: str, shard: bool):
|
||||
if strategy_name == "ddp":
|
||||
strategy = DDPStrategy()
|
||||
elif strategy_name == "colossalai_gemini":
|
||||
strategy = GeminiStrategy(placement_policy="auto", initial_scale=2**5)
|
||||
elif strategy_name == "colossalai_zero2":
|
||||
strategy = LowLevelZeroStrategy(stage=2, placement_policy="cuda")
|
||||
else:
|
||||
raise ValueError(f"Unsupported strategy '{strategy_name}'")
|
||||
|
||||
with strategy.model_init_context():
|
||||
actor = GPTActor(config=GPT_CONFIG).cuda()
|
||||
actor_optim = HybridAdam(actor.parameters())
|
||||
actor, actor_optim = strategy.prepare((actor, actor_optim))
|
||||
|
||||
train_step(strategy, actor, actor_optim)
|
||||
|
||||
ctx = tempfile.TemporaryDirectory() if dist.get_rank() == 0 else nullcontext()
|
||||
|
||||
with ctx as dirname:
|
||||
rank0_dirname = [dirname]
|
||||
dist.broadcast_object_list(rank0_dirname)
|
||||
rank0_dirname = rank0_dirname[0]
|
||||
|
||||
model_path = os.path.join(rank0_dirname, "model" if shard else f"model.pt")
|
||||
strategy.save_model(actor, model_path)
|
||||
optim_path = os.path.join(rank0_dirname, "optim" if shard else "optim.pt")
|
||||
strategy.save_optimizer(actor_optim, optim_path)
|
||||
dist.barrier()
|
||||
|
||||
strategy.load_model(actor, model_path, strict=False)
|
||||
strategy.load_optimizer(actor_optim, optim_path)
|
||||
dist.barrier()
|
||||
|
||||
train_step(strategy, actor, actor_optim)
|
||||
|
||||
|
||||
def run_dist(rank: int, world_size: int, port: int, strategy_name: str, shard: bool):
|
||||
os.environ["RANK"] = str(rank)
|
||||
os.environ["LOCAL_RANK"] = str(rank)
|
||||
os.environ["WORLD_SIZE"] = str(world_size)
|
||||
os.environ["MASTER_ADDR"] = "localhost"
|
||||
os.environ["MASTER_PORT"] = str(port)
|
||||
run_test_checkpoint(strategy_name, shard)
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
@pytest.mark.parametrize("world_size", [4])
|
||||
@pytest.mark.parametrize("strategy_name", ["ddp", "colossalai_gemini", "colossalai_zero2"])
|
||||
@pytest.mark.parametrize("shard", [False, True])
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_checkpoint(world_size: int, strategy_name: str, shard: bool):
|
||||
spawn(run_dist, world_size, strategy_name=strategy_name, shard=shard)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_checkpoint(2, "colossalai_gemini", shard=False)
|
|
@ -1,241 +0,0 @@
|
|||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from coati.dataset.prompt_dataset import PromptDataset
|
||||
from coati.dataset.reward_dataset import HhRlhfDataset, RmStaticDataset
|
||||
from coati.dataset.sft_dataset import IGNORE_INDEX, SFTDataset, SupervisedDataset
|
||||
from coati.models.chatglm.chatglm_tokenizer import ChatGLMTokenizer
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoTokenizer, BloomTokenizerFast, LlamaTokenizer, PreTrainedTokenizer
|
||||
from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
|
||||
|
||||
SFT_DATASET = [
|
||||
{
|
||||
"instruction": "Provide a list of the top 10 most popular mobile games in Asia",
|
||||
"input": "",
|
||||
"output": "The top 10 most popular mobile games in Asia are:\n1) PUBG Mobile\n2) Pokemon Go\n3) Candy Crush Saga\n4) Free Fire\n5) Clash of Clans\n6) Mario Kart Tour\n7) Arena of Valor\n8) Fantasy Westward Journey\n9) Subway Surfers\n10) ARK Survival Evolved",
|
||||
"id": 0,
|
||||
},
|
||||
{
|
||||
"instruction": "Please provide an action plan for reducing carbon footprint on a corporate level",
|
||||
"input": "",
|
||||
"output": "An action plan for reducing carbon footprint on a corporate level could include switching to renewable energy sources (e.g., solar or wind powered), investing in energy efficient infrastructure, implementing green production practices (e.g., reduce water and paper usage, dispose of materials responsibly, recycle/reuse materials), reducing or eliminating travel, encouraging employees to be conscious of and reduce their own energy usage, and incentivizing employees to participate in sustainable work practices.",
|
||||
"id": 1,
|
||||
},
|
||||
{
|
||||
"instruction": "Write a persuasive email to your boss explaining why you should have a pay raise",
|
||||
"input": "",
|
||||
"output": "Dear [Boss], \n\nI hope this message finds you well. I am writing to request a pay raise.\n\nAs you know, I have been a dedicated and hardworking member of the team since I started working here [insert number] of months/years ago. My enthusiasm and passion for my job has remained consistent over the years, and I have always given 100% to my role. \n\nI understand that the current financial situation is challenging, however, I would sincerely appreciate you taking the time to consider my request. I believe that my dedication to the job and the value that I bring to the organization warrants a raise. I work diligently and am confident that I can continue to be an asset to the company. \n\nI hope my request is taken into account and I thank you in advance for your understanding. I look forward to our conversation. \n\nSincerely,\n[Your Name]",
|
||||
"id": 2,
|
||||
},
|
||||
]
|
||||
|
||||
PROMPT_DATASET = [
|
||||
{
|
||||
"instruction": 'Edit this paragraph to make it more concise: "Yesterday, I went to the store and bought some things. Then, I came home and put them away. After that, I went for a walk and met some friends."',
|
||||
"id": 0,
|
||||
},
|
||||
{"instruction": "Write a descriptive paragraph about a memorable vacation you went on", "id": 1},
|
||||
{"instruction": "Write a persuasive essay arguing why homework should be banned in schools", "id": 2},
|
||||
{"instruction": "Create a chart comparing the statistics on student debt in the United States.", "id": 3},
|
||||
]
|
||||
|
||||
|
||||
def make_tokenizer(model: str):
|
||||
if model == "gpt2":
|
||||
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif model == "bloom":
|
||||
tokenizer = BloomTokenizerFast.from_pretrained("bigscience/bloom-560m")
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif model == "opt":
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
elif model == "llama":
|
||||
tokenizer = LlamaTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer")
|
||||
tokenizer.pad_token = tokenizer.unk_token
|
||||
elif model == "chatglm":
|
||||
tokenizer = ChatGLMTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
||||
else:
|
||||
raise ValueError(f"Unsupported model '{model}'")
|
||||
return tokenizer
|
||||
|
||||
|
||||
def check_content(input_ids_stripped: torch.Tensor, tokenizer: PreTrainedTokenizer, model: str):
|
||||
if model == "opt":
|
||||
# NOTE: Contrary to GPT2, OPT adds the EOS token </s> to the beginning of every prompt.
|
||||
assert input_ids_stripped[0] == tokenizer.eos_token_id
|
||||
input_ids_stripped = input_ids_stripped[1:]
|
||||
elif model == "llama":
|
||||
assert input_ids_stripped[0] == tokenizer.bos_token_id
|
||||
input_ids_stripped = input_ids_stripped[1:]
|
||||
elif model == "chatglm":
|
||||
assert input_ids_stripped[0] == tokenizer.bos_token_id
|
||||
assert input_ids_stripped[-1] == tokenizer.eos_token_id
|
||||
input_ids_stripped = input_ids_stripped[1:-1]
|
||||
assert torch.all(input_ids_stripped != tokenizer.pad_token_id)
|
||||
assert torch.all(input_ids_stripped != tokenizer.bos_token_id)
|
||||
assert torch.all(input_ids_stripped != tokenizer.eos_token_id)
|
||||
assert input_ids_stripped != tokenizer.sep_token_id
|
||||
assert input_ids_stripped != tokenizer.cls_token_id
|
||||
if model == "chatglm":
|
||||
assert torch.all(input_ids_stripped != tokenizer.mask_token_id)
|
||||
else:
|
||||
assert input_ids_stripped != tokenizer.mask_token_id
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model", ["gpt2", "bloom", "opt", "llama"])
|
||||
@pytest.mark.parametrize("max_length", [32, 1024])
|
||||
@pytest.mark.parametrize("max_datasets_size", [2])
|
||||
def test_prompt_dataset(model: str, max_datasets_size: int, max_length: int):
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
dataset_name = "prompt_dataset.json"
|
||||
with open(os.path.join(tmp_dir, dataset_name), "w") as f:
|
||||
json.dump(PROMPT_DATASET, f)
|
||||
tokenizer = make_tokenizer(model)
|
||||
assert tokenizer.padding_side in ("left", "right")
|
||||
prompt_dataset = PromptDataset(
|
||||
data_path=os.path.join(tmp_dir, dataset_name),
|
||||
tokenizer=tokenizer,
|
||||
max_datasets_size=max_datasets_size,
|
||||
max_length=max_length,
|
||||
)
|
||||
assert len(prompt_dataset) == min(max_datasets_size, len(PROMPT_DATASET))
|
||||
for i in range(len(prompt_dataset)):
|
||||
assert isinstance(prompt_dataset[i], dict)
|
||||
assert list(prompt_dataset[i].keys()) == ["input_ids", "attention_mask"]
|
||||
input_ids = prompt_dataset[i]["input_ids"]
|
||||
attention_mask = prompt_dataset[i]["attention_mask"]
|
||||
attention_mask = attention_mask.bool()
|
||||
assert input_ids.shape == attention_mask.shape == torch.Size([max_length])
|
||||
assert torch.all(input_ids[torch.logical_not(attention_mask)] == tokenizer.pad_token_id)
|
||||
check_content(input_ids.masked_select(attention_mask), tokenizer, model)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model", ["gpt2", "bloom", "opt", "llama"])
|
||||
@pytest.mark.parametrize(
|
||||
["dataset_path", "subset"], [("Anthropic/hh-rlhf", "harmless-base"), ("Dahoas/rm-static", None)]
|
||||
)
|
||||
@pytest.mark.parametrize("max_datasets_size", [32])
|
||||
@pytest.mark.parametrize("max_length", [32, 1024])
|
||||
def test_reward_dataset(model: str, dataset_path: str, subset: Optional[str], max_datasets_size: int, max_length: int):
|
||||
data = load_dataset(dataset_path, data_dir=subset)
|
||||
assert max_datasets_size <= len(data["train"]) and max_datasets_size <= len(data["test"])
|
||||
train_data = data["train"].select(range(max_datasets_size))
|
||||
test_data = data["test"].select(range(max_datasets_size))
|
||||
tokenizer = make_tokenizer(model)
|
||||
assert tokenizer.padding_side in ("left", "right")
|
||||
|
||||
if dataset_path == "Anthropic/hh-rlhf":
|
||||
train_dataset = HhRlhfDataset(train_data, tokenizer, max_length)
|
||||
test_dataset = HhRlhfDataset(test_data, tokenizer, max_length)
|
||||
elif dataset_path == "Dahoas/rm-static":
|
||||
train_dataset = RmStaticDataset(train_data, tokenizer, max_length)
|
||||
test_dataset = RmStaticDataset(test_data, tokenizer, max_length)
|
||||
else:
|
||||
raise ValueError(f'Unsupported dataset "{dataset_path}"')
|
||||
|
||||
assert len(train_dataset) == len(test_dataset) == max_datasets_size
|
||||
for i in range(max_datasets_size):
|
||||
chosen_ids, c_mask, reject_ids, r_mask = train_dataset[i]
|
||||
assert chosen_ids.shape == c_mask.shape == reject_ids.shape == r_mask.shape == torch.Size([max_length])
|
||||
c_mask = c_mask.to(torch.bool)
|
||||
r_mask = r_mask.to(torch.bool)
|
||||
if chosen_ids.masked_select(c_mask)[-1] == tokenizer.eos_token_id:
|
||||
check_content(chosen_ids.masked_select(c_mask)[:-1], tokenizer, model)
|
||||
assert torch.all(chosen_ids.masked_select(torch.logical_not(c_mask)) == tokenizer.pad_token_id)
|
||||
else:
|
||||
check_content(chosen_ids.masked_select(c_mask), tokenizer, model)
|
||||
assert torch.all(c_mask)
|
||||
if reject_ids.masked_select(r_mask)[-1] == tokenizer.eos_token_id:
|
||||
check_content(reject_ids.masked_select(r_mask)[:-1], tokenizer, model)
|
||||
assert torch.all(reject_ids.masked_select(torch.logical_not(r_mask)) == tokenizer.pad_token_id)
|
||||
else:
|
||||
check_content(reject_ids.masked_select(r_mask), tokenizer, model)
|
||||
assert torch.all(r_mask)
|
||||
|
||||
chosen_ids, c_mask, reject_ids, r_mask = test_dataset[i]
|
||||
assert chosen_ids.shape == c_mask.shape == reject_ids.shape == r_mask.shape == torch.Size([max_length])
|
||||
c_mask = c_mask.to(torch.bool)
|
||||
r_mask = r_mask.to(torch.bool)
|
||||
if chosen_ids.masked_select(c_mask)[-1] == tokenizer.eos_token_id:
|
||||
check_content(chosen_ids.masked_select(c_mask)[:-1], tokenizer, model)
|
||||
assert torch.all(chosen_ids.masked_select(torch.logical_not(c_mask)) == tokenizer.pad_token_id)
|
||||
else:
|
||||
check_content(chosen_ids.masked_select(c_mask), tokenizer, model)
|
||||
assert torch.all(c_mask)
|
||||
if reject_ids.masked_select(r_mask)[-1] == tokenizer.eos_token_id:
|
||||
check_content(reject_ids.masked_select(r_mask)[:-1], tokenizer, model)
|
||||
assert torch.all(reject_ids.masked_select(torch.logical_not(r_mask)) == tokenizer.pad_token_id)
|
||||
else:
|
||||
check_content(reject_ids.masked_select(r_mask), tokenizer, model)
|
||||
assert torch.all(r_mask)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model", ["gpt2", "bloom", "opt", "llama", "chatglm"])
|
||||
@pytest.mark.parametrize("dataset_path", ["yizhongw/self_instruct", None])
|
||||
@pytest.mark.parametrize("max_dataset_size", [2])
|
||||
@pytest.mark.parametrize("max_length", [32, 1024])
|
||||
def test_sft_dataset(model: str, dataset_path: Optional[str], max_dataset_size: int, max_length: int):
|
||||
tokenizer = make_tokenizer(model)
|
||||
if dataset_path == "yizhongw/self_instruct":
|
||||
data = load_dataset(dataset_path, "super_natural_instructions")
|
||||
train_data = data["train"].select(range(max_dataset_size))
|
||||
sft_dataset = SFTDataset(train_data, tokenizer, max_length)
|
||||
else:
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
dataset_name = "sft_dataset.json"
|
||||
with open(os.path.join(tmp_dir, dataset_name), "w") as f:
|
||||
json.dump(SFT_DATASET, f)
|
||||
sft_dataset = SupervisedDataset(
|
||||
tokenizer=tokenizer,
|
||||
data_path=os.path.join(tmp_dir, dataset_name),
|
||||
max_datasets_size=max_dataset_size,
|
||||
max_length=max_length,
|
||||
)
|
||||
assert len(sft_dataset) == min(max_dataset_size, len(SFT_DATASET))
|
||||
|
||||
if isinstance(tokenizer, ChatGLMTokenizer):
|
||||
for i in range(max_dataset_size):
|
||||
assert isinstance(sft_dataset[i], dict)
|
||||
assert list(sft_dataset[i].keys()) == ["input_ids", "labels"]
|
||||
input_ids = sft_dataset[i]["input_ids"]
|
||||
labels = sft_dataset[i]["labels"]
|
||||
assert input_ids.shape == labels.shape == torch.Size([max_length])
|
||||
|
||||
ignore_mask = labels == IGNORE_INDEX
|
||||
assert input_ids.masked_select(torch.logical_not(ignore_mask))[0] == tokenizer.bos_token_id
|
||||
check_content(input_ids.masked_select(torch.logical_not(ignore_mask)), tokenizer, model)
|
||||
return
|
||||
|
||||
for i in range(max_dataset_size):
|
||||
assert isinstance(sft_dataset[i], dict)
|
||||
assert list(sft_dataset[i].keys()) == ["input_ids", "labels", "attention_mask"]
|
||||
input_ids = sft_dataset[i]["input_ids"]
|
||||
labels = sft_dataset[i]["labels"]
|
||||
attention_mask = sft_dataset[i]["attention_mask"].to(torch.bool)
|
||||
assert input_ids.shape == labels.shape == attention_mask.shape == torch.Size([max_length])
|
||||
if input_ids.masked_select(attention_mask)[-1] == tokenizer.eos_token_id:
|
||||
check_content(input_ids.masked_select(attention_mask)[:-1], tokenizer, model)
|
||||
assert torch.all(input_ids.masked_select(torch.logical_not(attention_mask)) == tokenizer.pad_token_id)
|
||||
else:
|
||||
check_content(input_ids.masked_select(attention_mask), tokenizer, model)
|
||||
assert torch.all(attention_mask)
|
||||
ignore_mask = labels == IGNORE_INDEX
|
||||
prompt_mask = torch.logical_and(ignore_mask, attention_mask)
|
||||
check_content(input_ids.masked_select(prompt_mask), tokenizer, model)
|
||||
assert torch.all(input_ids.masked_select(ignore_mask ^ prompt_mask) == tokenizer.pad_token_id)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_sft_dataset(model="bloom", dataset_path="yizhongw/self_instruct", max_dataset_size=2, max_length=256)
|
||||
|
||||
test_reward_dataset(
|
||||
model="gpt2", dataset_path="Anthropic/hh-rlhf", subset="harmless-base", max_datasets_size=8, max_length=256
|
||||
)
|
||||
|
||||
test_prompt_dataset(model="opt", max_datasets_size=2, max_length=128)
|
|
@ -1,130 +0,0 @@
|
|||
import copy
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from coati.experience_buffer import NaiveExperienceBuffer
|
||||
from coati.experience_maker import NaiveExperienceMaker
|
||||
from coati.models.base import RewardModel
|
||||
from coati.models.gpt import GPTActor, GPTCritic
|
||||
from coati.trainer.ppo import _set_default_generate_kwargs
|
||||
from coati.trainer.strategies import DDPStrategy, GeminiStrategy
|
||||
from coati.trainer.strategies.colossalai import LowLevelZeroStrategy
|
||||
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
|
||||
|
||||
from colossalai.testing import rerun_if_address_is_in_use, spawn
|
||||
|
||||
GPT_CONFIG = GPT2Config(n_embd=128, n_layer=4, n_head=4)
|
||||
|
||||
|
||||
def get_data(batch_size: int, seq_len: int = 10) -> dict:
|
||||
input_ids = torch.randint(0, 50257, (batch_size, seq_len), device="cuda")
|
||||
attention_mask = torch.ones_like(input_ids)
|
||||
return dict(input_ids=input_ids, attention_mask=attention_mask)
|
||||
|
||||
|
||||
def gather_and_equal(tensor: torch.Tensor) -> bool:
|
||||
world_size = dist.get_world_size()
|
||||
outputs = [torch.empty_like(tensor) for _ in range(world_size)]
|
||||
dist.all_gather(outputs, tensor.contiguous())
|
||||
for t in outputs[1:]:
|
||||
if not torch.equal(outputs[0], t):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def make_and_consume_experience(strategy):
|
||||
EXPERIENCE_BATCH_SIZE = 4
|
||||
SAMPLE_BATCH_SIZE = 2
|
||||
|
||||
if strategy == "ddp":
|
||||
strategy = DDPStrategy()
|
||||
elif strategy == "colossalai-zero2":
|
||||
strategy = LowLevelZeroStrategy()
|
||||
elif strategy == "colossalai-gemini":
|
||||
strategy = GeminiStrategy(placement_policy="static")
|
||||
else:
|
||||
raise ValueError(f'Unsupported strategy "{strategy}"')
|
||||
|
||||
with strategy.model_init_context():
|
||||
actor = GPTActor(config=GPT_CONFIG).cuda()
|
||||
critic = GPTCritic(config=GPT_CONFIG).cuda()
|
||||
|
||||
initial_model = GPTActor(config=GPT_CONFIG).cuda()
|
||||
reward_model = RewardModel(model=copy.deepcopy(critic.model)).cuda()
|
||||
|
||||
actor, critic, initial_model, reward_model = strategy.prepare(actor, critic, initial_model, reward_model)
|
||||
|
||||
class MockTokenizer:
|
||||
def __init__(self):
|
||||
self.padding_side = "left"
|
||||
self.eos_token_id = 0
|
||||
self.pad_token_id = 0
|
||||
|
||||
tokenizer = MockTokenizer()
|
||||
experience_maker = NaiveExperienceMaker(actor, critic, reward_model, initial_model, tokenizer)
|
||||
data_buffer = NaiveExperienceBuffer(SAMPLE_BATCH_SIZE, cpu_offload=False)
|
||||
|
||||
generate_kwargs = dict(do_sample=True, max_length=16)
|
||||
generate_kwargs = _set_default_generate_kwargs(strategy, generate_kwargs, actor)
|
||||
|
||||
# experience of all ranks should be the same
|
||||
for _ in range(2):
|
||||
data = get_data(EXPERIENCE_BATCH_SIZE)
|
||||
assert gather_and_equal(data["input_ids"])
|
||||
assert gather_and_equal(data["attention_mask"])
|
||||
experience = experience_maker.make_experience(**data, do_sample=True, max_length=16)
|
||||
assert gather_and_equal(experience.sequences)
|
||||
assert gather_and_equal(experience.action_log_probs)
|
||||
assert gather_and_equal(experience.values)
|
||||
assert gather_and_equal(experience.reward)
|
||||
assert gather_and_equal(experience.advantages)
|
||||
assert gather_and_equal(experience.action_mask)
|
||||
assert gather_and_equal(experience.attention_mask)
|
||||
data_buffer.append(experience)
|
||||
|
||||
# data buffer's data should be the same
|
||||
buffer_size = torch.tensor([len(data_buffer)], device="cuda")
|
||||
assert gather_and_equal(buffer_size)
|
||||
for item in data_buffer.items:
|
||||
assert gather_and_equal(item.sequences)
|
||||
assert gather_and_equal(item.action_log_probs)
|
||||
assert gather_and_equal(item.values)
|
||||
assert gather_and_equal(item.reward)
|
||||
assert gather_and_equal(item.advantages)
|
||||
assert gather_and_equal(item.action_mask)
|
||||
assert gather_and_equal(item.attention_mask)
|
||||
|
||||
# dataloader of each rank should have the same size and different batch
|
||||
dataloader = strategy.setup_dataloader(data_buffer)
|
||||
dataloader_size = torch.tensor([len(dataloader)], device="cuda")
|
||||
assert gather_and_equal(dataloader_size)
|
||||
for experience in dataloader:
|
||||
assert not gather_and_equal(experience.sequences)
|
||||
assert not gather_and_equal(experience.action_log_probs)
|
||||
assert not gather_and_equal(experience.values)
|
||||
assert not gather_and_equal(experience.reward)
|
||||
assert not gather_and_equal(experience.advantages)
|
||||
# action mask and attention mask may be same
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port, strategy):
|
||||
os.environ["RANK"] = str(rank)
|
||||
os.environ["LOCAL_RANK"] = str(rank)
|
||||
os.environ["WORLD_SIZE"] = str(world_size)
|
||||
os.environ["MASTER_ADDR"] = "localhost"
|
||||
os.environ["MASTER_PORT"] = str(port)
|
||||
make_and_consume_experience(strategy)
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
@pytest.mark.parametrize("world_size", [2])
|
||||
@pytest.mark.parametrize("strategy", ["ddp", "colossalai-zero2", "colossalai-gemini"])
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_experience(world_size, strategy):
|
||||
spawn(run_dist, world_size, strategy=strategy)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_experience(2, "colossalai-zero2")
|
|
@ -1,11 +0,0 @@
|
|||
set -xue
|
||||
|
||||
BASE_DIR=$(dirname $(dirname $(realpath $BASH_SOURCE)))
|
||||
EXAMPLES_DIR=$BASE_DIR/examples
|
||||
|
||||
echo "[Test]: testing inference ..."
|
||||
|
||||
# HACK: skip llama due to oom
|
||||
for model in 'gpt2' 'bloom' 'opt'; do
|
||||
python $EXAMPLES_DIR/inference.py --model $model
|
||||
done
|
|
@ -1,245 +0,0 @@
|
|||
import copy
|
||||
from typing import Any, Callable, Dict, Tuple
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from coati.models.base import Actor, Critic, RewardModel, get_base_model
|
||||
from coati.models.bloom import BLOOMRM, BLOOMActor, BLOOMCritic
|
||||
from coati.models.chatglm import ChatGLMActor
|
||||
from coati.models.chatglm.chatglm_tokenizer import ChatGLMTokenizer
|
||||
from coati.models.generation import generate
|
||||
from coati.models.gpt import GPTRM, GPTActor, GPTCritic
|
||||
from coati.models.llama import LlamaActor
|
||||
from coati.models.lora import LoraLinear, convert_to_lora_module
|
||||
from coati.models.loss import GPTLMLoss, LogExpLoss, LogSigLoss, PolicyLoss, ValueLoss
|
||||
from coati.models.opt import OPTRM, OPTActor, OPTCritic
|
||||
from coati.models.utils import calc_action_log_probs, masked_mean
|
||||
|
||||
|
||||
@pytest.mark.parametrize("batch_size", [4])
|
||||
@pytest.mark.parametrize("seq_len", [32])
|
||||
@pytest.mark.parametrize(
|
||||
"actor_maker",
|
||||
[
|
||||
lambda: BLOOMActor(),
|
||||
lambda: GPTActor(),
|
||||
# HACK: skip llama due to long execution time
|
||||
# lambda: LlamaActor(),
|
||||
lambda: OPTActor(),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"generate_kwargs",
|
||||
[
|
||||
{
|
||||
"max_length": 64,
|
||||
"use_cache": True,
|
||||
"do_sample": True,
|
||||
"temperature": 1.0,
|
||||
"top_k": 50,
|
||||
}
|
||||
],
|
||||
)
|
||||
def test_generation(actor_maker: Callable[[], Actor], batch_size: int, seq_len: int, generate_kwargs: Dict[str, Any]):
|
||||
class MockTokenizer:
|
||||
def __init__(self):
|
||||
self.padding_side = "left"
|
||||
self.eos_token_id = 0
|
||||
self.pad_token_id = 0
|
||||
|
||||
actor = actor_maker()
|
||||
input_ids = torch.randint(0, 100, (batch_size, seq_len)).cuda()
|
||||
tokenizer = MockTokenizer()
|
||||
sequences = generate(actor.cuda(), input_ids, tokenizer, **generate_kwargs)
|
||||
assert sequences.shape == (batch_size, generate_kwargs["max_length"])
|
||||
|
||||
|
||||
def test_utils():
|
||||
fn_input = {"tensor": torch.ones((10,)), "mask": torch.randint(0, 2, (10,))}
|
||||
fn_output = masked_mean(dim=0, **fn_input)
|
||||
assert fn_output.dim() == 0
|
||||
assert torch.allclose(fn_output, torch.tensor(1.0))
|
||||
|
||||
batch_size = 4
|
||||
seq_len = 32
|
||||
num_labels = 10
|
||||
num_actions = 2
|
||||
fn_input = {
|
||||
"logits": torch.randn((batch_size, seq_len, num_labels)),
|
||||
"sequences": torch.randint(0, num_labels, (batch_size, seq_len)),
|
||||
"num_actions": num_actions,
|
||||
}
|
||||
fn_output = calc_action_log_probs(**fn_input)
|
||||
assert fn_output.shape == (batch_size, num_actions)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("lora_rank", [4])
|
||||
@pytest.mark.parametrize("num_dim", [32])
|
||||
@pytest.mark.parametrize("num_layers", [4])
|
||||
def test_lora(lora_rank: int, num_dim: int, num_layers: int):
|
||||
model = nn.ModuleList([nn.Linear(num_dim, num_dim) for _ in range(num_layers)])
|
||||
lora_model = convert_to_lora_module(model, lora_rank)
|
||||
assert isinstance(lora_model, nn.ModuleList)
|
||||
for i in range(num_layers):
|
||||
assert isinstance(lora_model[i], LoraLinear)
|
||||
assert lora_model[i].lora_A.shape == (lora_rank, num_dim)
|
||||
assert lora_model[i].lora_B.shape == (num_dim, lora_rank)
|
||||
|
||||
old_model = copy.deepcopy(lora_model)
|
||||
for i in range(num_layers):
|
||||
assert isinstance(lora_model[i], LoraLinear)
|
||||
assert torch.allclose(old_model[i].weight, lora_model[i].weight)
|
||||
assert torch.allclose(old_model[i].bias, lora_model[i].bias)
|
||||
assert torch.allclose(old_model[i].lora_B @ old_model[i].lora_A, lora_model[i].lora_B @ lora_model[i].lora_A)
|
||||
optimizer = torch.optim.Adam(lora_model.parameters())
|
||||
x = torch.randn(8, num_dim)
|
||||
for i in range(num_layers):
|
||||
x = lora_model[i](x)
|
||||
loss = x.sum()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
for i in range(num_layers):
|
||||
assert isinstance(lora_model[i], LoraLinear)
|
||||
assert torch.allclose(old_model[i].weight, lora_model[i].weight)
|
||||
assert torch.allclose(old_model[i].bias, lora_model[i].bias)
|
||||
assert not torch.allclose(
|
||||
old_model[i].lora_B @ old_model[i].lora_A, lora_model[i].lora_B @ lora_model[i].lora_A
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("batch_size", [8])
|
||||
@pytest.mark.parametrize("seq_len", [128])
|
||||
@pytest.mark.parametrize(
|
||||
"models_maker",
|
||||
[
|
||||
lambda: (BLOOMActor(), BLOOMCritic(), BLOOMRM()),
|
||||
lambda: (GPTActor(), GPTCritic(), GPTRM()),
|
||||
# HACK: skip llama due to long execution time
|
||||
# lambda: (LlamaActor(), LlamaCritic(), LlamaRM()),
|
||||
lambda: (OPTActor(), OPTCritic(), OPTRM()),
|
||||
lambda: (ChatGLMActor(), None, None),
|
||||
],
|
||||
)
|
||||
@torch.no_grad()
|
||||
def test_models(models_maker: Callable[[], Tuple[Actor, Critic, RewardModel]], batch_size: int, seq_len: int):
|
||||
actor_input = {
|
||||
"input_ids": torch.randint(0, 100, (batch_size, seq_len)),
|
||||
"attention_mask": torch.randint(0, 2, (batch_size, seq_len)),
|
||||
}
|
||||
critic_input = {
|
||||
"sequences": torch.randint(0, 100, (batch_size, seq_len)),
|
||||
"attention_mask": torch.randint(0, 2, (batch_size, seq_len)),
|
||||
}
|
||||
rm_input = {
|
||||
"sequences": torch.randint(0, 100, (batch_size, seq_len)),
|
||||
"attention_mask": torch.randint(0, 2, (batch_size, seq_len)),
|
||||
}
|
||||
|
||||
actor, critic, rm = models_maker()
|
||||
if isinstance(actor, ChatGLMActor):
|
||||
actor = actor.float()
|
||||
tokenizer = ChatGLMTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
||||
chatglm_special_token = torch.tensor([tokenizer.gmask_token_id, tokenizer.bos_token_id]).repeat(batch_size, 1)
|
||||
actor_input = {
|
||||
"input_ids": torch.cat(
|
||||
(
|
||||
torch.randint(0, 100, (batch_size, seq_len // 2)),
|
||||
chatglm_special_token,
|
||||
torch.randint(0, 100, (batch_size, seq_len // 2 - 2)),
|
||||
),
|
||||
dim=1,
|
||||
),
|
||||
"attention_mask": torch.randint(0, 2, (batch_size, 1, seq_len, seq_len)),
|
||||
}
|
||||
assert isinstance(actor, Actor)
|
||||
get_base_model(actor)
|
||||
actor_output = actor(**actor_input)
|
||||
assert actor_output.logits.shape[:2] == (batch_size, seq_len)
|
||||
|
||||
if critic:
|
||||
assert isinstance(critic, Critic)
|
||||
get_base_model(critic)
|
||||
critic_output = critic(**critic_input)
|
||||
assert critic_output.shape == (batch_size,)
|
||||
|
||||
if rm:
|
||||
assert isinstance(rm, RewardModel)
|
||||
get_base_model(rm)
|
||||
rm_output = rm(**rm_input)
|
||||
assert rm_output.shape == (batch_size,)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("batch_size", [16])
|
||||
@pytest.mark.parametrize("seq_len", [128])
|
||||
@pytest.mark.parametrize("num_labels", [100])
|
||||
def test_loss(batch_size: int, seq_len: int, num_labels: int):
|
||||
loss = GPTLMLoss()
|
||||
loss_input = {
|
||||
"logits": torch.randn(batch_size, seq_len, num_labels),
|
||||
"labels": torch.randint(0, num_labels, (batch_size, seq_len)),
|
||||
}
|
||||
loss(**loss_input)
|
||||
|
||||
loss = PolicyLoss()
|
||||
loss_input = {
|
||||
"log_probs": torch.randn(
|
||||
batch_size,
|
||||
),
|
||||
"old_log_probs": torch.randn(
|
||||
batch_size,
|
||||
),
|
||||
"advantages": torch.randn(
|
||||
batch_size,
|
||||
),
|
||||
}
|
||||
loss(**loss_input)
|
||||
|
||||
loss = ValueLoss()
|
||||
loss_input = {
|
||||
"values": torch.randn(
|
||||
batch_size,
|
||||
),
|
||||
"old_values": torch.randn(
|
||||
batch_size,
|
||||
),
|
||||
"reward": torch.randn(
|
||||
batch_size,
|
||||
),
|
||||
}
|
||||
loss(**loss_input)
|
||||
|
||||
loss = LogSigLoss()
|
||||
loss_input = {
|
||||
"chosen_reward": torch.randn(
|
||||
batch_size,
|
||||
),
|
||||
"reject_reward": torch.randn(
|
||||
batch_size,
|
||||
),
|
||||
}
|
||||
loss(**loss_input)
|
||||
|
||||
loss = LogExpLoss()
|
||||
loss_input = {
|
||||
"chosen_reward": torch.randn(
|
||||
batch_size,
|
||||
),
|
||||
"reject_reward": torch.randn(
|
||||
batch_size,
|
||||
),
|
||||
}
|
||||
loss(**loss_input)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
generate_kwargs = dict(max_length=40, use_cache=True, do_sample=True, temperature=1.0, top_k=50)
|
||||
test_generation(lambda: LlamaActor(), batch_size=4, seq_len=32, generate_kwargs=generate_kwargs)
|
||||
|
||||
test_utils()
|
||||
|
||||
test_lora(lora_rank=2, num_dim=8, num_layers=2)
|
||||
|
||||
test_models(models_maker=lambda: (BLOOMActor(), BLOOMCritic(), BLOOMRM()), batch_size=8, seq_len=128)
|
||||
|
||||
test_loss(batch_size=8, seq_len=128, num_labels=100)
|
|
@ -1,233 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set_n_least_used_CUDA_VISIBLE_DEVICES() {
|
||||
local n=${1:-"9999"}
|
||||
echo "GPU Memory Usage:"
|
||||
local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv |
|
||||
tail -n +2 |
|
||||
nl -v 0 |
|
||||
tee /dev/tty |
|
||||
sort -g -k 2 |
|
||||
awk '{print $1}' |
|
||||
head -n $n)
|
||||
export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g')
|
||||
echo "Now CUDA_VISIBLE_DEVICES is set to:"
|
||||
echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"
|
||||
}
|
||||
|
||||
set_n_least_used_CUDA_VISIBLE_DEVICES 4
|
||||
|
||||
set -xu
|
||||
|
||||
if [ -z "$SFT_DATASET" ]; then
|
||||
echo "Please set \$SFT_DATASET to the path to sft dataset."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$PROMPT_DATASET" ]; then
|
||||
echo "Please set \$PROMPT_DATASET to the path to prompts csv."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$PRETRAIN_DATASET" ]; then
|
||||
echo "Please set \$PRETRAIN_DATASET to the path to alpaca data."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NUM_RETRY=3
|
||||
BASE_DIR=$(dirname $(dirname $(realpath $BASH_SOURCE)))
|
||||
EXAMPLES_DIR=$BASE_DIR/examples
|
||||
MODELS_DIR=$BASE_DIR/examples/models_config
|
||||
MODELS=('gpt2' 'bloom' 'opt' 'llama')
|
||||
STRATEGIES=('ddp' 'colossalai_gemini' 'colossalai_zero2')
|
||||
|
||||
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
# install requirements
|
||||
pip install -r $EXAMPLES_DIR/requirements.txt
|
||||
|
||||
python $EXAMPLES_DIR/download_model.py --model-dir $MODELS_DIR --config-only
|
||||
|
||||
get_pretrain() {
|
||||
local model=$1
|
||||
if [[ $model == "gpt2" ]]; then
|
||||
echo "gpt2"
|
||||
elif [[ $model == "bloom" ]]; then
|
||||
echo "bigscience/bloom-560m"
|
||||
elif [[ $model == "opt" ]]; then
|
||||
echo "facebook/opt-350m"
|
||||
else
|
||||
echo "Unknown model $model"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
random_choice() {
|
||||
local arr=("$@")
|
||||
local len=${#arr[@]}
|
||||
local idx=$((RANDOM % len))
|
||||
echo ${arr[$idx]}
|
||||
}
|
||||
|
||||
echo "[Test]: testing sft ..."
|
||||
|
||||
# FIXME: This is a hack to skip tests that are not working
|
||||
# - gpt2-ddp: RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation
|
||||
# - llama-*: These tests can be passed locally, skipped for long execution time
|
||||
# - *-gemini: Gemini plugin does not support `from_pretrained` yet
|
||||
SKIPPED_TESTS=(
|
||||
"gpt2-ddp"
|
||||
"llama-ddp"
|
||||
"llama-colossalai_gemini"
|
||||
"llama-colossalai_zero2"
|
||||
)
|
||||
|
||||
GRAD_CKPTS=('' '--grad_checkpoint')
|
||||
for lora_rank in '0'; do
|
||||
for model in ${MODELS[@]}; do
|
||||
strategies=($(shuf -e "${STRATEGIES[@]}"))
|
||||
for strategy in ${strategies[@]}; do
|
||||
if [[ " ${SKIPPED_TESTS[*]} " =~ " $model-$strategy-$lora_rank " ]]; then
|
||||
echo "[Test]: Skipped $model-$strategy-$lora_rank"
|
||||
continue
|
||||
elif [[ " ${SKIPPED_TESTS[*]} " =~ " $model-$strategy " ]]; then
|
||||
echo "[Test]: Skipped $model-$strategy"
|
||||
continue
|
||||
fi
|
||||
pretrain=$(get_pretrain $model)
|
||||
pretrain_model=""
|
||||
if [[ $lora_rank -gt 0 ]]; then
|
||||
pretrain_model="--pretrain $pretrain"
|
||||
fi
|
||||
grad_ckpt=$(random_choice "${GRAD_CKPTS[@]}")
|
||||
for i in $(seq $NUM_RETRY); do
|
||||
echo "[Test]: $model-$strategy-$lora_rank, attempt $i"
|
||||
torchrun --standalone --nproc_per_node=4 $EXAMPLES_DIR/train_sft.py \
|
||||
$pretrain_model --tokenizer $MODELS_DIR/$model \
|
||||
--model $model --strategy $strategy --lora_rank $lora_rank $grad_ckpt \
|
||||
--dataset $SFT_DATASET --max_datasets_size 8 \
|
||||
--max_epochs 1 --batch_size 1 --accumulation_steps 1 --lr 1e-8 \
|
||||
--save_path $EXAMPLES_DIR/rlhf_models/sft_ckpt_${model}_${lora_rank}
|
||||
passed=$?
|
||||
if [ $passed -eq 0 ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ $passed -ne 0 ]; then
|
||||
echo "[Test]: Failed $model-$strategy-$lora_rank"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
echo "[Test]: testing reward model ..."
|
||||
|
||||
# FIXME: This is a hack to skip tests that are not working
|
||||
# - gpt2-ddp: RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation
|
||||
# - llama-*: These tests can be passed locally, skipped for long execution time
|
||||
# - *-gemini: Gemini plugin does not support `from_pretrained` yet
|
||||
SKIPPED_TESTS=(
|
||||
"gpt2-ddp"
|
||||
"llama-ddp"
|
||||
"llama-colossalai_gemini"
|
||||
"llama-colossalai_zero2"
|
||||
)
|
||||
|
||||
LOSS_FNS=('log_sig' 'log_exp')
|
||||
DATASETS=('Anthropic/hh-rlhf' 'Dahoas/rm-static')
|
||||
for lora_rank in '0'; do
|
||||
for model in ${MODELS[@]}; do
|
||||
strategies=($(shuf -e "${STRATEGIES[@]}"))
|
||||
for strategy in ${strategies[@]}; do
|
||||
if [[ " ${SKIPPED_TESTS[*]} " =~ " $model-$strategy-$lora_rank " ]]; then
|
||||
echo "[Test]: Skipped $model-$strategy-$lora_rank"
|
||||
continue
|
||||
elif [[ " ${SKIPPED_TESTS[*]} " =~ " $model-$strategy " ]]; then
|
||||
echo "[Test]: Skipped $model-$strategy"
|
||||
continue
|
||||
fi
|
||||
pretrain=$(get_pretrain $model)
|
||||
pretrain_model=""
|
||||
if [[ $lora_rank -gt 0 ]]; then
|
||||
pretrain_model="--pretrain $pretrain"
|
||||
fi
|
||||
loss_fn=$(random_choice "${LOSS_FNS[@]}")
|
||||
dataset=$(random_choice "${DATASETS[@]}")
|
||||
subset=$(if [[ $dataset == "Dahoas/rm-static" ]]; then echo "None"; else echo "harmless-base"; fi)
|
||||
for i in $(seq $NUM_RETRY); do
|
||||
echo "[Test]: $model-$strategy-$lora_rank, attempt $i"
|
||||
torchrun --standalone --nproc_per_node=4 $EXAMPLES_DIR/train_reward_model.py \
|
||||
$pretrain_model --tokenizer $MODELS_DIR/$model \
|
||||
--dataset $dataset --subset $subset --max_datasets_size 8 \
|
||||
--model $model --strategy $strategy --lora_rank $lora_rank \
|
||||
--loss_fn $loss_fn --batch_size 1 --lr 1e-8 \
|
||||
--save_path $EXAMPLES_DIR/rlhf_models/rm_ckpt_${model}_${lora_rank}.pt
|
||||
passed=$?
|
||||
if [ $passed -eq 0 ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ $passed -ne 0 ]; then
|
||||
echo "[Test]: Failed to train reward model $model-$strategy-$lora_rank"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
echo "[Test]: testing RLHF ..."
|
||||
|
||||
# FIXME: This is a hack to skip tests that are not working
|
||||
# - gpt2-ddp: RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation
|
||||
# - llama-*: These tests can be passed locally, skipped for long execution time
|
||||
# - *-gemini: Gemini plugin does not support `from_pretrained` yet
|
||||
SKIPPED_TESTS=(
|
||||
"gpt2-ddp"
|
||||
"llama-ddp"
|
||||
"llama-colossalai_gemini"
|
||||
"llama-colossalai_zero2"
|
||||
)
|
||||
|
||||
for model in ${MODELS[@]}; do
|
||||
for lora_rank in '0'; do
|
||||
strategies=($(shuf -e "${STRATEGIES[@]}"))
|
||||
for strategy in ${strategies[@]}; do
|
||||
if [[ " ${SKIPPED_TESTS[*]} " =~ " $model-$strategy-$lora_rank " ]]; then
|
||||
echo "[Test]: Skipped $model-$strategy-$lora_rank"
|
||||
continue
|
||||
elif [[ " ${SKIPPED_TESTS[*]} " =~ " $model-$strategy " ]]; then
|
||||
echo "[Test]: Skipped $model-$strategy"
|
||||
continue
|
||||
fi
|
||||
rm_pretrain=$(get_pretrain $model)
|
||||
rm_pretrain_model=""
|
||||
if [[ $lora_rank -gt 0 ]]; then
|
||||
rm_pretrain_model="--rm_pretrain $rm_pretrain"
|
||||
fi
|
||||
for i in $(seq $NUM_RETRY); do
|
||||
echo "[Test]: $model-$strategy-$lora_rank, attempt $i"
|
||||
torchrun --standalone --nproc_per_node=4 $EXAMPLES_DIR/train_prompts.py \
|
||||
--prompt_dataset $PROMPT_DATASET --pretrain_dataset $PRETRAIN_DATASET --max_datasets_size 32 \
|
||||
--strategy $strategy --model $model --tokenizer $MODELS_DIR/$model \
|
||||
--num_episodes 1 --num_collect_steps 1 --num_update_steps 1 --lr 1e-8 \
|
||||
--experience_batch_size 2 --train_batch_size 1 --lora_rank $lora_rank \
|
||||
--pretrain $EXAMPLES_DIR/rlhf_models/sft_ckpt_${model}_${lora_rank} \
|
||||
$rm_pretrain_model --rm_path $EXAMPLES_DIR/rlhf_models/rm_ckpt_${model}_${lora_rank}.pt \
|
||||
--save_path $EXAMPLES_DIR/rlhf_models/actor_checkpoint_prompts
|
||||
passed=$?
|
||||
if [ $passed -eq 0 ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ $passed -ne 0 ]; then
|
||||
echo "[Test]: Failed to train RLHF $model-$strategy-$lora_rank"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
rm -rf $EXAMPLES_DIR/rlhf_models/sft_ckpt_${model}_${lora_rank}
|
||||
rm $EXAMPLES_DIR/rlhf_models/rm_ckpt_${model}_${lora_rank}.pt
|
||||
done
|
||||
done
|
||||
rm -rf $EXAMPLES_DIR/rlhf_models/actor_checkpoint_prompts
|
|
@ -8,11 +8,10 @@ import argparse
|
|||
|
||||
import numpy as np
|
||||
import torch
|
||||
from transformers import LlamaTokenizer, LlamaForCausalLM
|
||||
from transformers import LlamaForCausalLM, LlamaTokenizer
|
||||
|
||||
from colossalai.logging import get_dist_logger
|
||||
|
||||
|
||||
logger = get_dist_logger()
|
||||
|
||||
|
||||
|
|
|
@ -10,8 +10,8 @@ import os
|
|||
from typing import Any, Dict, Tuple, Union
|
||||
|
||||
import torch
|
||||
from torch.optim.optimizer import Optimizer
|
||||
from torch.optim.lr_scheduler import _LRScheduler
|
||||
from torch.optim.optimizer import Optimizer
|
||||
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.cluster import DistCoordinator
|
||||
|
|
|
@ -0,0 +1,252 @@
|
|||
from copy import deepcopy
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from transformers import PreTrainedTokenizer
|
||||
from transformers.generation.utils import GenerationConfig, LogitsProcessorList, StoppingCriteriaList
|
||||
from transformers.utils import logging
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
def get_prompt_template(
|
||||
input_query: str,
|
||||
history: List[Dict] = None,
|
||||
roles: list = ["", "Human", "Assistant"],
|
||||
) -> str:
|
||||
"""
|
||||
Generates a prompt template for chat models based on input and history.
|
||||
|
||||
Args:
|
||||
input_query (str): User's current input query.
|
||||
history (List[Dict], optional): List of past conversations, each a dict with 'role' and 'message'.
|
||||
roles (list): Specifies the roles in the conversation, defaults to ["", "Human", "Assistant"].
|
||||
|
||||
Returns:
|
||||
str: A formatted prompt including the input query and history.
|
||||
"""
|
||||
prompt = ""
|
||||
if history is None:
|
||||
new_history = []
|
||||
else:
|
||||
new_history = deepcopy(history)
|
||||
|
||||
new_history.append({"role": roles[1], "message": input_query.strip()})
|
||||
new_history.append({"role": roles[2], "message": None})
|
||||
|
||||
for _, item in enumerate(new_history):
|
||||
role = item.get("role")
|
||||
message = item.get("message")
|
||||
if role == roles[0]:
|
||||
prompt += f"<s>{message}\n\n"
|
||||
else:
|
||||
if message:
|
||||
prompt += f"{role}: <s>{message}</s>"
|
||||
else:
|
||||
prompt += f"{role}: <s>"
|
||||
return prompt
|
||||
|
||||
|
||||
@torch.inference_mode()
|
||||
def streaming_chat(
|
||||
model: Any,
|
||||
tokenizer: PreTrainedTokenizer,
|
||||
input_query: str,
|
||||
history: List[Dict] = None,
|
||||
roles: list = ["", "Human", "Assistant"],
|
||||
past_key_values: Tuple[Tuple[torch.FloatTensor, Any], Any] = None,
|
||||
temperature: float = 0.8,
|
||||
top_p: float = 0.95,
|
||||
top_k: int = 50,
|
||||
do_sample: bool = True,
|
||||
length_penalty: float = 1.2,
|
||||
max_new_tokens: int = 512,
|
||||
logits_processor: LogitsProcessorList = None,
|
||||
return_past_key_values: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Streaming chat responses generation with a given model and tokenizer.
|
||||
|
||||
Args:
|
||||
model (Any): The language model to generate responses.
|
||||
tokenizer (PreTrainedTokenizer): Tokenizer compatible with the model, used for encoding inputs and decoding responses.
|
||||
input_query (str): The current user input to respond to.
|
||||
history (List[Dict], optional): A list of past conversations, where each conversation is a dictionary with keys 'role' and 'message'.
|
||||
roles (list): Roles involved in the conversation, defaults to ["", "Human", "Assistant"].
|
||||
past_key_values (Tuple[Tuple[torch.FloatTensor, Any], Any], optional): Past key values for incremental decoding.
|
||||
temperature (float): The temperature value for token sampling, defaults to 0.8.
|
||||
top_p (float): Nucleus sampling probability threshold, defaults to 0.95.
|
||||
top_k (int): Top-K filtering threshold, defaults to 50.
|
||||
do_sample (bool): Whether to sample responses, defaults to True.
|
||||
length_penalty (float): Penalty for response length, defaults to 1.2.
|
||||
max_new_tokens (int): Maximum number of new tokens to generate, defaults to 512.
|
||||
logits_processor (LogitsProcessorList, optional): Custom logits processors, defaults to None.
|
||||
return_past_key_values (bool): Whether to return past key values for further incremental decoding, defaults to False.
|
||||
**kwargs: Additional keyword arguments for generation.
|
||||
|
||||
Yields:
|
||||
Tuple[str, List[Dict], Optional[Tuple[Tuple[torch.FloatTensor, Any], Any]]]: A tuple containing the generated response, updated history, and
|
||||
optionally the updated past key values if `return_past_key_values` is True.
|
||||
|
||||
Ensures padding is on the left side for the tokenizer.
|
||||
"""
|
||||
assert tokenizer.padding_side == "left", "Current generation only supports left padding."
|
||||
if history is None:
|
||||
history = []
|
||||
if logits_processor is None:
|
||||
logits_processor = LogitsProcessorList()
|
||||
|
||||
generation_kwargs = {
|
||||
"temperature": temperature,
|
||||
"top_p": top_p,
|
||||
"top_k": top_k,
|
||||
"do_sample": do_sample,
|
||||
"max_new_tokens": max_new_tokens,
|
||||
"length_penalty": length_penalty,
|
||||
"use_cache": True,
|
||||
**kwargs,
|
||||
}
|
||||
|
||||
prompt_str = get_prompt_template(input_query, history=history, roles=roles)
|
||||
|
||||
eos_token_id = [tokenizer.eos_token_id]
|
||||
inputs = tokenizer(prompt_str, return_tensors="pt").to(model.device)
|
||||
history.append({"role": roles[1], "message": input_query.strip()})
|
||||
history.append({"role": roles[2], "message": None})
|
||||
|
||||
for outputs in stream_generate(
|
||||
model,
|
||||
**inputs,
|
||||
past_key_values=past_key_values,
|
||||
eos_token_id=eos_token_id,
|
||||
return_past_key_values=return_past_key_values,
|
||||
**generation_kwargs,
|
||||
):
|
||||
if return_past_key_values:
|
||||
outputs, past_key_values = outputs
|
||||
|
||||
outputs = outputs.tolist()[0][len(inputs["input_ids"][0]) : -1]
|
||||
response = tokenizer.decode(outputs)
|
||||
|
||||
history[-1]["message"] = response.strip()
|
||||
if return_past_key_values:
|
||||
yield response, history, past_key_values
|
||||
else:
|
||||
yield response, history
|
||||
|
||||
|
||||
@torch.inference_mode()
|
||||
def stream_generate(
|
||||
model: Any,
|
||||
input_ids: torch.Tensor,
|
||||
generation_config: Optional[GenerationConfig] = None,
|
||||
logits_processor: Optional[LogitsProcessorList] = None,
|
||||
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
||||
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
|
||||
return_past_key_values: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Generates sequences of token ids using the specified model and generation parameters.
|
||||
Adapted from https://huggingface.co/THUDM/chatglm3-6b/blob/main/modeling_chatglm.py
|
||||
|
||||
Args:
|
||||
model (Any): The model used for generating sequences of token ids.
|
||||
input_ids (torch.Tensor): The sequence used as a prompt for the generation or as model inputs to the encoder.
|
||||
generation_config (Optional[GenerationConfig]): The generation configuration to be used as base parametrization for the generation call.
|
||||
logits_processor (Optional[LogitsProcessorList]): Custom logits processors that complement the default logits processors built from arguments
|
||||
and generation config.
|
||||
stopping_criteria (Optional[StoppingCriteriaList]): Custom stopping criteria that complement the default stopping criteria built from arguments
|
||||
and a generation config.
|
||||
prefix_allowed_tokens_fn (Optional[Callable[[int, torch.Tensor], List[int]]]): Function to constrain token generation.
|
||||
return_past_key_values (bool): Whether to return past key values for further incremental decoding, defaults to False.
|
||||
**kwargs: Additional parameters for model generation.
|
||||
|
||||
Yields:
|
||||
torch.Tensor: The generated token IDs, updated after each generation step.
|
||||
Optional[Tuple[Tuple[torch.FloatTensor, Any], Any]]: The past key values, returned if `return_past_key_values` is True, defaults to False.
|
||||
"""
|
||||
input_ids_len = input_ids.size(1)
|
||||
|
||||
if generation_config is None:
|
||||
generation_config = model.generation_config
|
||||
generation_config = deepcopy(generation_config)
|
||||
model_kwargs = generation_config.update(**kwargs)
|
||||
|
||||
eos_token_id = generation_config.eos_token_id
|
||||
if isinstance(eos_token_id, int):
|
||||
eos_token_id = [eos_token_id]
|
||||
eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
|
||||
|
||||
if generation_config.max_new_tokens is not None:
|
||||
generation_config.max_length = generation_config.max_new_tokens + input_ids_len
|
||||
|
||||
if input_ids_len >= generation_config.max_length:
|
||||
input_ids_string = "decoder_input_ids" if model.config.is_encoder_decoder else "input_ids"
|
||||
logger.warning(
|
||||
f"Input length of {input_ids_string} is {input_ids_len}, but `max_length` is set to"
|
||||
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
|
||||
" increasing `max_new_tokens`."
|
||||
)
|
||||
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
|
||||
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
|
||||
|
||||
# prepare distribution pre_processing samplers
|
||||
logits_processor = model._get_logits_processor(
|
||||
generation_config=generation_config,
|
||||
input_ids_seq_length=input_ids_len,
|
||||
encoder_input_ids=input_ids,
|
||||
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
|
||||
logits_processor=logits_processor,
|
||||
)
|
||||
|
||||
# prepare stopping criteria
|
||||
stopping_criteria = model._get_stopping_criteria(
|
||||
generation_config=generation_config, stopping_criteria=stopping_criteria
|
||||
)
|
||||
|
||||
logits_warper = model._get_logits_warper(generation_config)
|
||||
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
|
||||
scores = None
|
||||
|
||||
while True:
|
||||
model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs)
|
||||
# forward pass to get next token
|
||||
outputs = model(
|
||||
**model_inputs,
|
||||
return_dict=True,
|
||||
output_attentions=False,
|
||||
output_hidden_states=False,
|
||||
)
|
||||
|
||||
# NOTE: this is correct only in left padding mode
|
||||
# pre-process distribution
|
||||
next_token_logits = outputs.logits[:, -1, :]
|
||||
next_token_scores = logits_processor(input_ids, next_token_logits)
|
||||
next_token_scores = logits_warper(input_ids, next_token_scores)
|
||||
|
||||
# sample
|
||||
probs = nn.functional.softmax(next_token_scores, dim=-1)
|
||||
if generation_config.do_sample:
|
||||
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
|
||||
else:
|
||||
next_tokens = torch.argmax(probs, dim=-1)
|
||||
|
||||
# update generated ids, model inputs, and length for next step
|
||||
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
|
||||
model_kwargs = model._update_model_kwargs_for_generation(
|
||||
outputs, model_kwargs, is_encoder_decoder=model.config.is_encoder_decoder
|
||||
)
|
||||
unfinished_sequences = unfinished_sequences.mul(
|
||||
next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
|
||||
)
|
||||
|
||||
if return_past_key_values:
|
||||
yield input_ids, outputs.past_key_values
|
||||
else:
|
||||
yield input_ids
|
||||
# stop when each sentence is finished, or if exceed the maximum length
|
||||
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
|
||||
break
|
|
@ -43,7 +43,7 @@ if __name__ == '__main__':
|
|||
model.to(device)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("hpcai-tech/Colossal-LLaMA-2-13b-base")
|
||||
|
||||
|
||||
question = "xxx" # Your question.
|
||||
prompt = (f"请对下面的问题,给出准确、可靠、安全的回答\n"
|
||||
f"{question}"
|
||||
|
@ -111,7 +111,7 @@ The Colossal-AI team has conducted comprehensive evaluation for safety and value
|
|||
| 整车热管理主要包括什么内容 | 整车热管理主要包括:1、发动机冷却系统;2、散热系统;3、空调冷却系统;4、采暖系统;5、动力转向器冷却系统;6、润滑油冷却系统;7、变速箱冷却系统;8、燃油冷却系统;9、冷却系统排放液系统。 |
|
||||
| 简述下汽车空调-热泵循环系统的原理 | 汽车空调系统主要有三种形式,其中热泵循环系统是其中的一种。其原理是利用发动机排放的热量,通过压缩机将冷媒压缩成高温高压的状态,然后通过冷却器释放到空气中,吸收周围的热量,再通过膨胀阀释放到蒸发器中,蒸发器在压缩机的作用下将冷媒压缩成高温高压的状态,再次释放到空气中,从而实现制冷的效果。 |
|
||||
|
||||
#### Finance
|
||||
#### Finance
|
||||
| Question | <center>Colossal-LLaMA-2-13b-base</center> |
|
||||
| :------: | :----------------------- |
|
||||
| 如何看待当前提前还贷热 | 提前还贷是一种常见的还贷方式,在市场中存在着许多的因素和影响,需要具体分析当前的经济形势和人们心理状况的影响。提前还贷可以帮助借款人早日摆脱债务负担,但也存在一些风险和不确定性。从经济形势来看,当前利率下探、还款压力减轻等因素可能会导致一部分借款人提前还贷。此外,一些购房者可能会受到市场不景气的影响,担心未来房产价值会下降,因此提前还贷以避免资产贬值。从借款人心理状况来看,提前还贷也可能会由于借款人对未来经济预期的不确定性而产生压力和不满。因此,提前还贷并不能一概而论,需要根据具体情况进行分析。如果提前还贷对借款人的经济情况和未来规划造成不利影响,建议谨慎考虑是否要提前还贷。|
|
||||
|
@ -150,4 +150,4 @@ The Colossal-AI team has conducted comprehensive evaluation for safety and value
|
|||
|
||||
|
||||
## Conclusion
|
||||
The Colossal-AI team's advanced 13B model, compared to the 7B version, features a refined data structure categorizing information into informative, functional, and memory replay data. Informative data is intricately subdivided into major categories, each further segmented for precise control. Concurrently, data scale across domains is expanded. Tailored enhancements meet community demands for large model capabilities in natural language processing tasks, ensuring proficiency during pre-training and cost-effective fine-tuning. Addressing security and values concerns, multidimensional controls are implemented, securing the baseline model and aligning it with correct values.
|
||||
The Colossal-AI team's advanced 13B model, compared to the 7B version, features a refined data structure categorizing information into informative, functional, and memory replay data. Informative data is intricately subdivided into major categories, each further segmented for precise control. Concurrently, data scale across domains is expanded. Tailored enhancements meet community demands for large model capabilities in natural language processing tasks, ensuring proficiency during pre-training and cost-effective fine-tuning. Addressing security and values concerns, multidimensional controls are implemented, securing the baseline model and aligning it with correct values.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue