mirror of https://github.com/hpcaitech/ColossalAI
Merge branch 'main' into feature/shardformer
commit
a39a5c66fe
|
@ -61,8 +61,8 @@ jobs:
|
|||
run:
|
||||
shell: bash
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}
|
||||
cancel-in-progress: false
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- name: Copy testmon cache
|
||||
run: | # branch name may contain slash, we need to replace it with space
|
||||
|
@ -87,8 +87,8 @@ jobs:
|
|||
anyLibraryFileChanged: ${{ steps.find-lib-change.outputs.any_changed }}
|
||||
runs-on: ubuntu-latest
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}
|
||||
cancel-in-progress: false
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
|
@ -147,8 +147,8 @@ jobs:
|
|||
run:
|
||||
shell: bash
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}
|
||||
cancel-in-progress: false
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- name: Checkout TensorNVMe
|
||||
uses: actions/checkout@v2
|
||||
|
|
|
@ -13,8 +13,8 @@ jobs:
|
|||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}
|
||||
cancel-in-progress: false
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- id: set-matrix
|
||||
|
@ -44,8 +44,8 @@ jobs:
|
|||
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10
|
||||
timeout-minutes: 120
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}
|
||||
cancel-in-progress: false
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
|
|
|
@ -17,8 +17,8 @@ jobs:
|
|||
github.event.pull_request.base.repo.full_name == 'hpcaitech/ColossalAI'
|
||||
runs-on: ubuntu-latest
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}
|
||||
cancel-in-progress: false
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
|
@ -35,8 +35,8 @@ jobs:
|
|||
github.event.pull_request.base.repo.full_name == 'hpcaitech/ColossalAI'
|
||||
runs-on: ubuntu-latest
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}
|
||||
cancel-in-progress: false
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
|
|
|
@ -20,8 +20,8 @@ jobs:
|
|||
any_changed: ${{ steps.changed-files.outputs.any_changed }}
|
||||
changed_files: ${{ steps.changed-files.outputs.all_changed_files }}
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}
|
||||
cancel-in-progress: false
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
name: Detect changed example files
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -63,8 +63,8 @@ jobs:
|
|||
run:
|
||||
shell: bash
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}
|
||||
cancel-in-progress: false
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- name: Checkout ColossalAI-Documentation
|
||||
uses: actions/checkout@v2
|
||||
|
|
|
@ -21,8 +21,8 @@ jobs:
|
|||
anyChanged: ${{ steps.setup-matrix.outputs.anyChanged }}
|
||||
name: Detect changed example files
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}
|
||||
cancel-in-progress: false
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
@ -81,8 +81,8 @@ jobs:
|
|||
options: --gpus all --rm -v /data/scratch/examples-data:/data/
|
||||
timeout-minutes: 10
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}
|
||||
cancel-in-progress: false
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
|
|
|
@ -28,9 +28,8 @@ jobs:
|
|||
- name: Checkout ColossalAI
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install ColossalAI and ChatGPT
|
||||
- name: Install ChatGPT
|
||||
run: |
|
||||
pip install -e .
|
||||
cd applications/Chat
|
||||
pip install -v .
|
||||
pip install -r examples/requirements.txt
|
||||
|
|
|
@ -30,9 +30,8 @@ jobs:
|
|||
- name: Checkout ColossalAI
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install ColossalAI and ChatGPT
|
||||
- name: Install ChatGPT
|
||||
run: |
|
||||
pip install -e .
|
||||
cd applications/Chat
|
||||
pip install -v .
|
||||
pip install -r requirements-test.txt
|
||||
|
|
13
README.md
13
README.md
|
@ -25,6 +25,7 @@
|
|||
</div>
|
||||
|
||||
## Latest News
|
||||
* [2023/09] [70 Billion Parameter LLaMA2 Model Training Accelerated by 195%](https://www.hpc-ai.tech/blog/70b-llama2-training)
|
||||
* [2023/07] [HPC-AI Tech Raises 22 Million USD in Series A Funding](https://www.hpc-ai.tech/blog/hpc-ai-tech-raises-22-million-usd-in-series-a-funding-to-fuel-team-expansion-and-business-growth)
|
||||
* [2023/07] [65B Model Pretraining Accelerated by 38%, Best Practices for Building LLaMA-Like Base Models Open-Source](https://www.hpc-ai.tech/blog/large-model-pretraining)
|
||||
* [2023/03] [ColossalChat: An Open-Source Solution for Cloning ChatGPT With a Complete RLHF Pipeline](https://medium.com/@yangyou_berkeley/colossalchat-an-open-source-solution-for-cloning-chatgpt-with-a-complete-rlhf-pipeline-5edf08fb538b)
|
||||
|
@ -50,7 +51,7 @@
|
|||
<li>
|
||||
<a href="#Parallel-Training-Demo">Parallel Training Demo</a>
|
||||
<ul>
|
||||
<li><a href="#LLaMA">LLaMA</a></li>
|
||||
<li><a href="#LLaMA2">LLaMA 1/2</a></li>
|
||||
<li><a href="#GPT-3">GPT-3</a></li>
|
||||
<li><a href="#GPT-2">GPT-2</a></li>
|
||||
<li><a href="#BERT">BERT</a></li>
|
||||
|
@ -217,8 +218,16 @@ Acceleration of [AlphaFold Protein Structure](https://alphafold.ebi.ac.uk/)
|
|||
<p align="right">(<a href="#top">back to top</a>)</p>
|
||||
|
||||
## Parallel Training Demo
|
||||
### LLaMA2
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/llama2_pretraining.png" width=600/>
|
||||
</p>
|
||||
|
||||
### LLaMA
|
||||
- 70 billion parameter LLaMA2 model training accelerated by 195%
|
||||
[[code]](https://github.com/hpcaitech/ColossalAI/tree/example/llama/examples/language/llama)
|
||||
[[blog]](https://www.hpc-ai.tech/blog/70b-llama2-training)
|
||||
|
||||
### LLaMA1
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/examples/images/LLaMA_pretraining.png" width=600/>
|
||||
</p>
|
||||
|
|
|
@ -19,7 +19,7 @@ import torch
|
|||
from torch.utils.data import Dataset
|
||||
from tqdm import tqdm
|
||||
from transformers import PreTrainedTokenizer
|
||||
|
||||
from coati.models.chatglm.chatglm_tokenizer import ChatGLMTokenizer
|
||||
from colossalai.logging import get_dist_logger
|
||||
|
||||
from .utils import is_rank_0, jload
|
||||
|
@ -71,6 +71,42 @@ def _preprocess(sources: Sequence[str],
|
|||
return sequences_token["input_ids"], labels, sequences_token["attention_mask"]
|
||||
|
||||
|
||||
def _preprocess_chatglm(sources: Sequence[str],
|
||||
targets: Sequence[str],
|
||||
tokenizer: PreTrainedTokenizer,
|
||||
max_length: int,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Preprocess the data by tokenizing.
|
||||
None for attention mask, ChatGLM will calculate attention mask according to input ids
|
||||
"""
|
||||
|
||||
labels = []
|
||||
input_ids = []
|
||||
for source, target in zip(sources, targets):
|
||||
source_id = tokenizer.encode(text=source, add_special_tokens=False)
|
||||
target_id = tokenizer.encode(text=target, add_special_tokens=False)
|
||||
input_id = tokenizer.build_inputs_with_special_tokens(source_id, target_id)
|
||||
# truncate
|
||||
sp_token_list = [tokenizer.gmask_token_id, tokenizer.bos_token_id]
|
||||
truncate_length = max(0, len(input_id) - max_length)
|
||||
input_id = input_id[truncate_length: ]
|
||||
if truncate_length == len(source_id) + 1:
|
||||
input_id = sp_token_list + input_id[1: ]
|
||||
elif truncate_length > len(source_id) + 1:
|
||||
input_id = sp_token_list + input_id[2: ]
|
||||
|
||||
context_length = input_id.index(tokenizer.bos_token_id)
|
||||
mask_position = context_length - 1
|
||||
label = [IGNORE_INDEX] * context_length + input_id[mask_position+1:]
|
||||
|
||||
pad_len = max_length - len(input_id)
|
||||
input_id = input_id + [tokenizer.pad_token_id] * pad_len
|
||||
input_ids.append(input_id)
|
||||
labels.append(label + [IGNORE_INDEX] * pad_len)
|
||||
return torch.tensor(input_ids), torch.tensor(labels), None
|
||||
|
||||
|
||||
class SFTDataset(Dataset):
|
||||
"""
|
||||
Dataset for sft model
|
||||
|
@ -94,18 +130,25 @@ class SFTDataset(Dataset):
|
|||
data["completion"] + tokenizer.eos_token
|
||||
for data in tqdm(dataset, disable=not is_rank_0())
|
||||
]
|
||||
|
||||
self.input_ids, self.labels, self.attention_mask = \
|
||||
_preprocess(sources, targets, tokenizer, max_length)
|
||||
if isinstance(tokenizer, ChatGLMTokenizer):
|
||||
self.input_ids, self.labels, self.attention_mask = \
|
||||
_preprocess_chatglm(sources, targets, tokenizer, max_length)
|
||||
else:
|
||||
self.input_ids, self.labels, self.attention_mask = \
|
||||
_preprocess(sources, targets, tokenizer, max_length)
|
||||
|
||||
def __len__(self):
|
||||
length = self.input_ids.shape[0]
|
||||
return length
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return dict(input_ids=self.input_ids[idx],
|
||||
labels=self.labels[idx],
|
||||
attention_mask=self.attention_mask[idx])
|
||||
if self.attention_mask is not None:
|
||||
return dict(input_ids=self.input_ids[idx],
|
||||
labels=self.labels[idx],
|
||||
attention_mask=self.attention_mask[idx])
|
||||
else:
|
||||
return dict(input_ids=self.input_ids[idx],
|
||||
labels=self.labels[idx])
|
||||
|
||||
|
||||
class SupervisedDataset(Dataset):
|
||||
|
@ -137,14 +180,22 @@ class SupervisedDataset(Dataset):
|
|||
]
|
||||
|
||||
logger.info("Tokenizing inputs... This may take some time...")
|
||||
self.input_ids, self.labels, self.attention_mask = \
|
||||
_preprocess(sources, targets, tokenizer, max_length)
|
||||
if isinstance(tokenizer, ChatGLMTokenizer):
|
||||
self.input_ids, self.labels, self.attention_mask = \
|
||||
_preprocess_chatglm(sources, targets, tokenizer, max_length)
|
||||
else:
|
||||
self.input_ids, self.labels, self.attention_mask = \
|
||||
_preprocess(sources, targets, tokenizer, max_length)
|
||||
|
||||
def __len__(self):
|
||||
length = self.input_ids.shape[0]
|
||||
return length
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return dict(input_ids=self.input_ids[idx],
|
||||
labels=self.labels[idx],
|
||||
attention_mask=self.attention_mask[idx])
|
||||
if self.attention_mask is not None:
|
||||
return dict(input_ids=self.input_ids[idx],
|
||||
labels=self.labels[idx],
|
||||
attention_mask=self.attention_mask[idx])
|
||||
else:
|
||||
return dict(input_ids=self.input_ids[idx],
|
||||
labels=self.labels[idx])
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
from .chatglm_actor import ChatGLMActor
|
||||
|
||||
__all__ = ['ChatGLMActor']
|
|
@ -0,0 +1,34 @@
|
|||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from .configuration_chatglm import ChatGLMConfig
|
||||
from .modeling_chatglm import ChatGLMForConditionalGeneration
|
||||
|
||||
from ..base import Actor
|
||||
|
||||
|
||||
class ChatGLMActor(Actor):
|
||||
"""
|
||||
ChatGLM Actor model.
|
||||
|
||||
Args:
|
||||
pretrained (str): Pretrained model name or path.
|
||||
config (ChatGLMConfig): Model config.
|
||||
checkpoint (bool): Enable gradient checkpointing.
|
||||
|
||||
do not support lora for now.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
pretrained: str = None,
|
||||
config: Optional[ChatGLMConfig] = None,
|
||||
checkpoint: bool = False) -> None:
|
||||
if pretrained is not None:
|
||||
model = ChatGLMForConditionalGeneration.from_pretrained(pretrained)
|
||||
elif config is not None:
|
||||
model = ChatGLMForConditionalGeneration(config)
|
||||
else:
|
||||
model = ChatGLMForConditionalGeneration(ChatGLMConfig())
|
||||
if checkpoint:
|
||||
model.gradient_checkpointing_enable()
|
||||
super().__init__(model, lora_rank=0, lora_train_bias='none')
|
|
@ -0,0 +1,446 @@
|
|||
"""
|
||||
This code is copied from https://huggingface.co/THUDM/chatglm-6b/blob/main/tokenization_chatglm.py
|
||||
"""
|
||||
"""Tokenization classes for ChatGLM."""
|
||||
from typing import List, Optional, Union
|
||||
import os
|
||||
|
||||
from transformers.tokenization_utils import PreTrainedTokenizer
|
||||
from transformers.utils import logging, PaddingStrategy
|
||||
from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
|
||||
from typing import Dict
|
||||
import sentencepiece as spm
|
||||
import numpy as np
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
|
||||
"THUDM/chatglm-6b": 2048,
|
||||
}
|
||||
|
||||
|
||||
class TextTokenizer:
|
||||
def __init__(self, model_path):
|
||||
self.sp = spm.SentencePieceProcessor()
|
||||
self.sp.Load(model_path)
|
||||
self.num_tokens = self.sp.vocab_size()
|
||||
|
||||
def encode(self, text):
|
||||
return self.sp.EncodeAsIds(text)
|
||||
|
||||
def decode(self, ids: List[int]):
|
||||
return self.sp.DecodeIds(ids)
|
||||
|
||||
def tokenize(self, text):
|
||||
return self.sp.EncodeAsPieces(text)
|
||||
|
||||
def convert_tokens_to_string(self, tokens):
|
||||
return self.sp.DecodePieces(tokens)
|
||||
|
||||
def convert_tokens_to_ids(self, tokens):
|
||||
return [self.sp.PieceToId(token) for token in tokens]
|
||||
|
||||
def convert_token_to_id(self, token):
|
||||
return self.sp.PieceToId(token)
|
||||
|
||||
def convert_id_to_token(self, idx):
|
||||
return self.sp.IdToPiece(idx)
|
||||
|
||||
def __len__(self):
|
||||
return self.num_tokens
|
||||
|
||||
|
||||
class SPTokenizer:
|
||||
def __init__(
|
||||
self,
|
||||
vocab_file,
|
||||
num_image_tokens=20000,
|
||||
max_blank_length=80,
|
||||
byte_fallback=True,
|
||||
):
|
||||
assert vocab_file is not None
|
||||
self.vocab_file = vocab_file
|
||||
self.num_image_tokens = num_image_tokens
|
||||
self.special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "<unused_0>", "<sop>", "<eop>", "<ENC>", "<dBLOCK>"]
|
||||
self.max_blank_length = max_blank_length
|
||||
self.byte_fallback = byte_fallback
|
||||
self.text_tokenizer = TextTokenizer(vocab_file)
|
||||
|
||||
def _get_text_tokenizer(self):
|
||||
return self.text_tokenizer
|
||||
|
||||
@staticmethod
|
||||
def get_blank_token(length: int):
|
||||
assert length >= 2
|
||||
return f"<|blank_{length}|>"
|
||||
|
||||
@staticmethod
|
||||
def get_tab_token():
|
||||
return f"<|tab|>"
|
||||
|
||||
@property
|
||||
def num_text_tokens(self):
|
||||
return self.text_tokenizer.num_tokens
|
||||
|
||||
@property
|
||||
def num_tokens(self):
|
||||
return self.num_image_tokens + self.num_text_tokens
|
||||
|
||||
@staticmethod
|
||||
def _encode_whitespaces(text: str, max_len: int = 80):
|
||||
text = text.replace("\t", SPTokenizer.get_tab_token())
|
||||
for i in range(max_len, 1, -1):
|
||||
text = text.replace(" " * i, SPTokenizer.get_blank_token(i))
|
||||
return text
|
||||
|
||||
def _preprocess(self, text: str, linebreak=True, whitespaces=True):
|
||||
if linebreak:
|
||||
text = text.replace("\n", "<n>")
|
||||
if whitespaces:
|
||||
text = self._encode_whitespaces(text, max_len=self.max_blank_length)
|
||||
return text
|
||||
|
||||
def encode(
|
||||
self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True
|
||||
) -> List[int]:
|
||||
"""
|
||||
@param text: Text to encode.
|
||||
@param linebreak: Whether to encode newline (\n) in text.
|
||||
@param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding.
|
||||
@param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text.
|
||||
@param add_dummy_prefix: Whether to add dummy blank space in the beginning.
|
||||
"""
|
||||
text = self._preprocess(text, linebreak, whitespaces)
|
||||
if not add_dummy_prefix:
|
||||
text = "<n>" + text
|
||||
tmp = self._get_text_tokenizer().encode(text)
|
||||
tokens = [x + self.num_image_tokens for x in tmp]
|
||||
return tokens if add_dummy_prefix else tokens[2:]
|
||||
|
||||
def postprocess(self, text):
|
||||
text = text.replace("<n>", "\n")
|
||||
text = text.replace(SPTokenizer.get_tab_token(), "\t")
|
||||
for i in range(2, self.max_blank_length + 1):
|
||||
text = text.replace(self.get_blank_token(i), " " * i)
|
||||
return text
|
||||
|
||||
def decode(self, text_ids: List[int]) -> str:
|
||||
ids = [int(_id) - self.num_image_tokens for _id in text_ids]
|
||||
ids = [_id for _id in ids if _id >= 0]
|
||||
text = self._get_text_tokenizer().decode(ids)
|
||||
text = self.postprocess(text)
|
||||
return text
|
||||
|
||||
def decode_tokens(self, tokens: List[str]) -> str:
|
||||
text = self._get_text_tokenizer().convert_tokens_to_string(tokens)
|
||||
text = self.postprocess(text)
|
||||
return text
|
||||
|
||||
def tokenize(
|
||||
self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True
|
||||
) -> List[str]:
|
||||
"""
|
||||
@param text: Text to encode.
|
||||
@param linebreak: Whether to encode newline (\n) in text.
|
||||
@param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding.
|
||||
@param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text.
|
||||
@param add_dummy_prefix: Whether to add dummy blank space in the beginning.
|
||||
"""
|
||||
text = self._preprocess(text, linebreak, whitespaces)
|
||||
if not add_dummy_prefix:
|
||||
text = "<n>" + text
|
||||
tokens = self._get_text_tokenizer().tokenize(text)
|
||||
return tokens if add_dummy_prefix else tokens[2:]
|
||||
|
||||
def __getitem__(self, x: Union[int, str]):
|
||||
if isinstance(x, int):
|
||||
if x < self.num_image_tokens:
|
||||
return "<image_{}>".format(x)
|
||||
else:
|
||||
return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens)
|
||||
elif isinstance(x, str):
|
||||
if x.startswith("<image_") and x.endswith(">") and x[7:-1].isdigit():
|
||||
return int(x[7:-1])
|
||||
else:
|
||||
return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens
|
||||
else:
|
||||
raise ValueError("The key should be str or int.")
|
||||
|
||||
|
||||
class ChatGLMTokenizer(PreTrainedTokenizer):
|
||||
"""
|
||||
Construct a ChatGLM tokenizer. Based on byte-level Byte-Pair-Encoding.
|
||||
|
||||
Args:
|
||||
vocab_file (`str`):
|
||||
Path to the vocabulary file.
|
||||
"""
|
||||
|
||||
vocab_files_names = {"vocab_file": "ice_text.model"}
|
||||
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
||||
model_input_names = ["input_ids", "attention_mask", "position_ids"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_file,
|
||||
do_lower_case=False,
|
||||
remove_space=False,
|
||||
bos_token='<sop>',
|
||||
eos_token='<eop>',
|
||||
end_token='</s>',
|
||||
mask_token='[MASK]',
|
||||
gmask_token='[gMASK]',
|
||||
padding_side="left",
|
||||
pad_token="<pad>",
|
||||
unk_token="<unk>",
|
||||
num_image_tokens=20000,
|
||||
**kwargs
|
||||
) -> None:
|
||||
super().__init__(
|
||||
do_lower_case=do_lower_case,
|
||||
remove_space=remove_space,
|
||||
padding_side=padding_side,
|
||||
bos_token=bos_token,
|
||||
eos_token=eos_token,
|
||||
end_token=end_token,
|
||||
mask_token=mask_token,
|
||||
gmask_token=gmask_token,
|
||||
pad_token=pad_token,
|
||||
unk_token=unk_token,
|
||||
num_image_tokens=num_image_tokens,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
self.do_lower_case = do_lower_case
|
||||
self.remove_space = remove_space
|
||||
self.vocab_file = vocab_file
|
||||
|
||||
self.bos_token = bos_token
|
||||
self.eos_token = eos_token
|
||||
self.end_token = end_token
|
||||
self.mask_token = mask_token
|
||||
self.gmask_token = gmask_token
|
||||
|
||||
self.sp_tokenizer = SPTokenizer(vocab_file, num_image_tokens=num_image_tokens)
|
||||
|
||||
""" Initialisation """
|
||||
|
||||
@property
|
||||
def gmask_token_id(self) -> Optional[int]:
|
||||
if self.gmask_token is None:
|
||||
return None
|
||||
return self.convert_tokens_to_ids(self.gmask_token)
|
||||
|
||||
@property
|
||||
def end_token_id(self) -> Optional[int]:
|
||||
"""
|
||||
`Optional[int]`: Id of the end of context token in the vocabulary. Returns `None` if the token has not been
|
||||
set.
|
||||
"""
|
||||
if self.end_token is None:
|
||||
return None
|
||||
return self.convert_tokens_to_ids(self.end_token)
|
||||
|
||||
@property
|
||||
def vocab_size(self):
|
||||
""" Returns vocab size """
|
||||
return self.sp_tokenizer.num_tokens
|
||||
|
||||
def get_vocab(self):
|
||||
""" Returns vocab as a dict """
|
||||
vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
|
||||
vocab.update(self.added_tokens_encoder)
|
||||
return vocab
|
||||
|
||||
def preprocess_text(self, inputs):
|
||||
if self.remove_space:
|
||||
outputs = " ".join(inputs.strip().split())
|
||||
else:
|
||||
outputs = inputs
|
||||
|
||||
if self.do_lower_case:
|
||||
outputs = outputs.lower()
|
||||
|
||||
return outputs
|
||||
|
||||
def _tokenize(self, text, **kwargs):
|
||||
""" Returns a tokenized string. """
|
||||
text = self.preprocess_text(text)
|
||||
|
||||
seq = self.sp_tokenizer.tokenize(text)
|
||||
|
||||
return seq
|
||||
|
||||
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
||||
return self.sp_tokenizer.decode_tokens(tokens)
|
||||
|
||||
def _decode(
|
||||
self,
|
||||
token_ids: Union[int, List[int]],
|
||||
**kwargs
|
||||
) -> str:
|
||||
if isinstance(token_ids, int):
|
||||
token_ids = [token_ids]
|
||||
if len(token_ids) == 0:
|
||||
return ""
|
||||
if self.pad_token_id in token_ids: # remove pad
|
||||
token_ids = list(filter((self.pad_token_id).__ne__, token_ids))
|
||||
return super()._decode(token_ids, **kwargs)
|
||||
|
||||
def _convert_token_to_id(self, token):
|
||||
""" Converts a token (str) in an id using the vocab. """
|
||||
return self.sp_tokenizer[token]
|
||||
|
||||
def _convert_id_to_token(self, index):
|
||||
"""Converts an index (integer) in a token (str) using the vocab."""
|
||||
return self.sp_tokenizer[index]
|
||||
|
||||
def save_vocabulary(self, save_directory, filename_prefix=None):
|
||||
"""
|
||||
Save the vocabulary and special tokens file to a directory.
|
||||
|
||||
Args:
|
||||
save_directory (`str`):
|
||||
The directory in which to save the vocabulary.
|
||||
filename_prefix (`str`, *optional*):
|
||||
An optional prefix to add to the named of the saved files.
|
||||
|
||||
Returns:
|
||||
`Tuple(str)`: Paths to the files saved.
|
||||
"""
|
||||
if os.path.isdir(save_directory):
|
||||
vocab_file = os.path.join(
|
||||
save_directory, self.vocab_files_names["vocab_file"]
|
||||
)
|
||||
else:
|
||||
vocab_file = save_directory
|
||||
|
||||
with open(self.vocab_file, 'rb') as fin:
|
||||
proto_str = fin.read()
|
||||
|
||||
with open(vocab_file, "wb") as writer:
|
||||
writer.write(proto_str)
|
||||
|
||||
return (vocab_file,)
|
||||
|
||||
def build_inputs_with_special_tokens(
|
||||
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
||||
) -> List[int]:
|
||||
"""
|
||||
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
||||
adding special tokens. A BERT sequence has the following format:
|
||||
|
||||
- single sequence: `[CLS] X [SEP]`
|
||||
- pair of sequences: `[CLS] A [SEP] B [SEP]`
|
||||
|
||||
Args:
|
||||
token_ids_0 (`List[int]`):
|
||||
List of IDs to which the special tokens will be added.
|
||||
token_ids_1 (`List[int]`, *optional*):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
|
||||
Returns:
|
||||
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
||||
"""
|
||||
gmask_id = self.sp_tokenizer[self.gmask_token]
|
||||
eos_id = self.sp_tokenizer[self.eos_token]
|
||||
token_ids_0 = token_ids_0 + [gmask_id, self.sp_tokenizer[self.bos_token]]
|
||||
if token_ids_1 is not None:
|
||||
token_ids_0 = token_ids_0 + token_ids_1
|
||||
return token_ids_0
|
||||
|
||||
def _pad(
|
||||
self,
|
||||
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
|
||||
|
||||
Args:
|
||||
encoded_inputs:
|
||||
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
|
||||
max_length: maximum length of the returned list and optionally padding length (see below).
|
||||
Will truncate by taking into account the special tokens.
|
||||
padding_strategy: PaddingStrategy to use for padding.
|
||||
|
||||
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
|
||||
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
|
||||
- PaddingStrategy.DO_NOT_PAD: Do not pad
|
||||
The tokenizer padding sides are defined in self.padding_side:
|
||||
|
||||
- 'left': pads on the left of the sequences
|
||||
- 'right': pads on the right of the sequences
|
||||
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
|
||||
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
|
||||
`>= 7.5` (Volta).
|
||||
return_attention_mask:
|
||||
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
|
||||
"""
|
||||
# Load from model defaults
|
||||
bos_token_id = self.sp_tokenizer[self.bos_token]
|
||||
mask_token_id = self.sp_tokenizer[self.mask_token]
|
||||
gmask_token_id = self.sp_tokenizer[self.gmask_token]
|
||||
assert self.padding_side == "left"
|
||||
|
||||
required_input = encoded_inputs[self.model_input_names[0]]
|
||||
seq_length = len(required_input)
|
||||
|
||||
if padding_strategy == PaddingStrategy.LONGEST:
|
||||
max_length = len(required_input)
|
||||
|
||||
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
|
||||
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
|
||||
|
||||
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
|
||||
|
||||
# Initialize attention mask if not present.
|
||||
if max_length is not None:
|
||||
if "attention_mask" not in encoded_inputs:
|
||||
if bos_token_id in required_input:
|
||||
context_length = required_input.index(bos_token_id)
|
||||
else:
|
||||
context_length = seq_length
|
||||
attention_mask = np.ones((1, seq_length, seq_length))
|
||||
attention_mask = np.tril(attention_mask)
|
||||
attention_mask[:, :, :context_length] = 1
|
||||
attention_mask = np.bool_(attention_mask < 0.5)
|
||||
encoded_inputs["attention_mask"] = attention_mask
|
||||
|
||||
if "position_ids" not in encoded_inputs:
|
||||
if bos_token_id in required_input:
|
||||
context_length = required_input.index(bos_token_id)
|
||||
else:
|
||||
context_length = seq_length
|
||||
position_ids = np.arange(seq_length, dtype=np.int64)
|
||||
mask_token = mask_token_id if mask_token_id in required_input else gmask_token_id
|
||||
if mask_token in required_input:
|
||||
mask_position = required_input.index(mask_token)
|
||||
position_ids[context_length:] = mask_position
|
||||
block_position_ids = np.concatenate(
|
||||
[np.zeros(context_length, dtype=np.int64),
|
||||
np.arange(1, seq_length - context_length + 1, dtype=np.int64)])
|
||||
encoded_inputs["position_ids"] = np.stack([position_ids, block_position_ids], axis=0)
|
||||
|
||||
if needs_to_be_padded:
|
||||
difference = max_length - len(required_input)
|
||||
|
||||
if "attention_mask" in encoded_inputs:
|
||||
encoded_inputs["attention_mask"] = np.pad(encoded_inputs["attention_mask"],
|
||||
pad_width=[(0, 0), (difference, 0), (difference, 0)],
|
||||
mode='constant', constant_values=True)
|
||||
if "token_type_ids" in encoded_inputs:
|
||||
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
|
||||
"token_type_ids"
|
||||
]
|
||||
if "special_tokens_mask" in encoded_inputs:
|
||||
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
|
||||
if "position_ids" in encoded_inputs:
|
||||
encoded_inputs["position_ids"] = np.pad(encoded_inputs["position_ids"],
|
||||
pad_width=[(0, 0), (difference, 0)])
|
||||
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
|
||||
|
||||
return encoded_inputs
|
|
@ -0,0 +1,107 @@
|
|||
"""
|
||||
This code is copied from https://huggingface.co/THUDM/chatglm-6b/resolve/main/configuration_chatglm.py
|
||||
"""
|
||||
|
||||
""" ChatGLM model configuration """
|
||||
|
||||
from transformers.configuration_utils import PretrainedConfig
|
||||
from transformers.utils import logging
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
class ChatGLMConfig(PretrainedConfig):
|
||||
r"""
|
||||
This is the configuration class to store the configuration of a [`~ChatGLMModel`].
|
||||
It is used to instantiate an ChatGLM model according to the specified arguments, defining the model
|
||||
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
|
||||
the ChatGLM-6B [THUDM/ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b) architecture.
|
||||
|
||||
Configuration objects inherit from [`PretrainedConfig`] and can be used
|
||||
to control the model outputs. Read the documentation from [`PretrainedConfig`]
|
||||
for more information.
|
||||
|
||||
|
||||
Args:
|
||||
vocab_size (`int`, *optional*, defaults to 150528):
|
||||
Vocabulary size of the ChatGLM-6B model. Defines the number of different tokens that can be represented by the
|
||||
`inputs_ids` passed when calling [`~ChatGLMModel`] or
|
||||
[`~TFChatGLMModel`].
|
||||
hidden_size (`int`, *optional*, defaults to 4096):
|
||||
Dimension of the encoder layers and the pooler layer.
|
||||
num_hidden_layers (`int`, *optional*, defaults to 28):
|
||||
Number of hidden layers in the Transformer encoder.
|
||||
num_attention_heads (`int`, *optional*, defaults to 32):
|
||||
Number of attention heads for each attention layer in the Transformer encoder.
|
||||
inner_hidden_size (`int`, *optional*, defaults to 16384):
|
||||
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
||||
max_sequence_length (`int`, *optional*, defaults to 512):
|
||||
The maximum sequence length that this model might ever be used with.
|
||||
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
|
||||
layernorm_epsilon (`float`, *optional*, defaults to 1e-5):
|
||||
The epsilon used by the layer normalization layers.
|
||||
use_cache (`bool`, *optional*, defaults to `True`):
|
||||
Whether the model should return the last key/values attentions (not used by all models).
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from configuration_chatglm import ChatGLMConfig
|
||||
>>> from modeling_chatglm import ChatGLMModel
|
||||
|
||||
>>> # Initializing a ChatGLM-6B THUDM/ChatGLM-6B style configuration
|
||||
>>> configuration = ChatGLMConfig()
|
||||
|
||||
>>> # Initializing a model from the THUDM/ChatGLM-6B style configuration
|
||||
>>> model = ChatGLMModel(configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
>>> configuration = model.config
|
||||
```
|
||||
"""
|
||||
model_type = "chatglm"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_size=130528,
|
||||
hidden_size=4096,
|
||||
num_layers=28,
|
||||
num_attention_heads=32,
|
||||
layernorm_epsilon=1e-5,
|
||||
use_cache=True,
|
||||
bos_token_id=130004,
|
||||
eos_token_id=130005,
|
||||
mask_token_id=130000,
|
||||
gmask_token_id=130001,
|
||||
pad_token_id=3,
|
||||
max_sequence_length=2048,
|
||||
inner_hidden_size=16384,
|
||||
position_encoding_2d=True,
|
||||
quantization_bit=0,
|
||||
pre_seq_len=None,
|
||||
prefix_projection=False,
|
||||
**kwargs
|
||||
):
|
||||
self.num_layers = num_layers
|
||||
self.vocab_size = vocab_size
|
||||
self.hidden_size = hidden_size
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.max_sequence_length = max_sequence_length
|
||||
self.layernorm_epsilon = layernorm_epsilon
|
||||
self.inner_hidden_size = inner_hidden_size
|
||||
self.use_cache = use_cache
|
||||
self.bos_token_id = bos_token_id
|
||||
self.eos_token_id = eos_token_id
|
||||
self.pad_token_id = pad_token_id
|
||||
self.mask_token_id = mask_token_id
|
||||
self.gmask_token_id = gmask_token_id
|
||||
self.position_encoding_2d = position_encoding_2d
|
||||
self.quantization_bit = quantization_bit
|
||||
self.pre_seq_len = pre_seq_len
|
||||
self.prefix_projection = prefix_projection
|
||||
|
||||
super().__init__(
|
||||
pad_token_id=pad_token_id,
|
||||
bos_token_id=bos_token_id,
|
||||
eos_token_id=eos_token_id,
|
||||
**kwargs
|
||||
)
|
File diff suppressed because it is too large
Load Diff
|
@ -52,9 +52,13 @@ class SFTTrainer(SLTrainer):
|
|||
for batch_id, batch in enumerate(self.train_dataloader):
|
||||
|
||||
batch = to_device(batch, torch.cuda.current_device())
|
||||
outputs = self.model(batch["input_ids"],
|
||||
attention_mask=batch["attention_mask"],
|
||||
labels=batch["labels"])
|
||||
if "attention_mask" in batch:
|
||||
outputs = self.model(batch["input_ids"],
|
||||
attention_mask=batch["attention_mask"],
|
||||
labels=batch["labels"])
|
||||
else:
|
||||
outputs = self.model(batch["input_ids"],
|
||||
labels=batch["labels"])
|
||||
|
||||
loss = outputs.loss
|
||||
loss = loss / self.accumulation_steps
|
||||
|
|
|
@ -16,10 +16,9 @@
|
|||
"chat": {
|
||||
"GPT": [
|
||||
"language organization",
|
||||
"relevance",
|
||||
"naturalness",
|
||||
"engagingness",
|
||||
"reasonableness"
|
||||
"fidelity"
|
||||
],
|
||||
"Metrics": [
|
||||
"Distinct"
|
||||
|
@ -27,7 +26,6 @@
|
|||
},
|
||||
"classification": {
|
||||
"GPT": [
|
||||
"language organization",
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
|
@ -40,7 +38,6 @@
|
|||
},
|
||||
"closed_qa": {
|
||||
"GPT": [
|
||||
"language organization",
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
|
@ -53,7 +50,6 @@
|
|||
},
|
||||
"extraction": {
|
||||
"GPT": [
|
||||
"language organization",
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
|
@ -74,7 +70,20 @@
|
|||
"BLEU",
|
||||
"ROUGE",
|
||||
"BERTScore"
|
||||
]
|
||||
]
|
||||
},
|
||||
"logical_reasoning": {
|
||||
"GPT": [
|
||||
"correctness",
|
||||
"relevance",
|
||||
"reasonableness"
|
||||
],
|
||||
"Metrics": [
|
||||
"BLEU",
|
||||
"ROUGE",
|
||||
"BERTScore",
|
||||
"CHRF"
|
||||
]
|
||||
},
|
||||
"open_qa": {
|
||||
"GPT": [
|
||||
|
@ -117,11 +126,79 @@
|
|||
"conciseness"
|
||||
],
|
||||
"Metrics": [
|
||||
"BLEU",
|
||||
"ROUGE",
|
||||
"BERTScore",
|
||||
"CHRF"
|
||||
]
|
||||
]
|
||||
},
|
||||
"Finance": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
]
|
||||
},
|
||||
"Law": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
]
|
||||
},
|
||||
"Education": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
]
|
||||
},
|
||||
"Medical": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
]
|
||||
},
|
||||
"STEM": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
]
|
||||
},
|
||||
"SocialScience": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
]
|
||||
},
|
||||
"Humanity": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
]
|
||||
},
|
||||
"Other": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
]
|
||||
},
|
||||
"ethics": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,10 +26,9 @@
|
|||
"chat": {
|
||||
"GPT": [
|
||||
"language organization",
|
||||
"relevance",
|
||||
"naturalness",
|
||||
"engagingness",
|
||||
"reasonableness"
|
||||
"fidelity"
|
||||
],
|
||||
"Metrics": [
|
||||
"Distinct"
|
||||
|
@ -45,7 +44,6 @@
|
|||
},
|
||||
"classification": {
|
||||
"GPT": [
|
||||
"language organization",
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
|
@ -63,7 +61,6 @@
|
|||
},
|
||||
"closed_qa": {
|
||||
"GPT": [
|
||||
"language organization",
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
|
@ -81,7 +78,6 @@
|
|||
},
|
||||
"extraction": {
|
||||
"GPT": [
|
||||
"language organization",
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
|
@ -114,6 +110,21 @@
|
|||
"data2text-informativeness"
|
||||
]
|
||||
},
|
||||
"logical_reasoning": {
|
||||
"GPT": [
|
||||
"correctness",
|
||||
"relevance",
|
||||
"reasonableness"
|
||||
],
|
||||
"Metrics": [
|
||||
"BLEU",
|
||||
"ROUGE",
|
||||
"BERTScore",
|
||||
"CHRF"
|
||||
],
|
||||
"UniEval": [
|
||||
]
|
||||
},
|
||||
"open_qa": {
|
||||
"GPT": [
|
||||
"language organization",
|
||||
|
@ -176,12 +187,96 @@
|
|||
"CHRF"
|
||||
],
|
||||
"UniEval": [
|
||||
"summarization-coherence",
|
||||
"summarization-consistency",
|
||||
"summarization-fluency",
|
||||
"summarization-relevance",
|
||||
"data2text-naturalness",
|
||||
"data2text-informativeness"
|
||||
]
|
||||
},
|
||||
"Finance": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
],
|
||||
"UniEval": [
|
||||
]
|
||||
},
|
||||
"Law": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
],
|
||||
"UniEval": [
|
||||
]
|
||||
},
|
||||
"Education": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
],
|
||||
"UniEval": [
|
||||
]
|
||||
},
|
||||
"Medical": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
],
|
||||
"UniEval": [
|
||||
]
|
||||
},
|
||||
"STEM": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
],
|
||||
"UniEval": [
|
||||
]
|
||||
},
|
||||
"SocialScience": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
],
|
||||
"UniEval": [
|
||||
]
|
||||
},
|
||||
"Humanity": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
],
|
||||
"UniEval": [
|
||||
]
|
||||
},
|
||||
"Other": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
],
|
||||
"UniEval": [
|
||||
]
|
||||
},
|
||||
"ethics": {
|
||||
"GPT": [
|
||||
"relevance",
|
||||
"correctness"
|
||||
],
|
||||
"Metrics": [
|
||||
],
|
||||
"UniEval": [
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,14 +26,16 @@
|
|||
"relevance": "切题(1-5):答案内容是否切题,不答非所问,并且严格遵照题目要求。",
|
||||
"naturalness": "自然(1-5):答案是否自然,并且符合问题给定的身份。",
|
||||
"engagingness": "参与感(1-5):答案是否对前面的对话内容做出了恰当的反应,是否理解对话的语境和背景。",
|
||||
"reasonableness": "合理性(1-5):答案是否能够与前面的对话内容形成逻辑上的衔接,是否符合常理,能否在这个上下文中合理存在。"
|
||||
"reasonableness": "合理性(1-5):答案是否能够与前面的对话内容形成逻辑上的衔接,是否符合常理,能否在这个上下文中合理存在。",
|
||||
"fidelity": "保真度(1-5):答案是否能够严格遵守角色的设定回答给定的请求。"
|
||||
},
|
||||
"CoT": {
|
||||
"language organization": "1. 阅读答案,并检查是否有语法错误、用词不当或其他显著的错误。\n2. 检查答案是否具有逻辑性,能够按照合理的顺序传达信息并且能够自圆其说。\n3. 确定答案是否与问题或主题相关,并且能够传达清晰的信息。\n4. 检查答案是否连贯,是否使用适当的转换和过渡来保持句子和段落之间的连贯性。\n5. 检查答案是否具有明确的结构和组织方式,使得读者可以轻松理解信息的层次和结构。\n6. 根据以上因素综合评估答案的语言组织,并给出一个1到5的分数,其中5表示语言组织非常好,而1表示语言组织非常差。\n\n语言组织:",
|
||||
"relevance": "1. 阅读题目,确定题目所问的问题是什么,以及需要回答哪些方面的问题。\n2. 阅读答案,确认答案是否直接回答了题目所问的问题。\n3. 检查答案是否严格遵照了题目的要求,包括答题方式、答题长度、答题格式等等。\n4. 根据以上因素综合评估答案的切题程度,并给出一个1到5的分数,其中5表示答案非常切题,而1表示答案完全没有切题。\n\n切题:",
|
||||
"naturalness": "1. 阅读题目,确定题目提供的身份信息。\n2. 检查答案内容是否符合题目给定的身份。\n3. 根据以上因素,对该回答的自然性进行打分,分数从1到5,其中1表示不自然,5表示非常自然,并符合问题给定的身份。\n\n自然:",
|
||||
"engagingness": "1. 阅读题目,确定对话的语境和背景。\n2. 检查答案是否充分理解对话的语境和背景,能否自然地融入到对话中而不显得突兀。\n3. 根据以上因素,对该回答的参与感进行打分,分数从1到5,其中1表示没有参与感,5表示非常有参与感,并且恰当地理解了对话的语境和背景。\n\n参与感:",
|
||||
"reasonableness": "1. 阅读题目,确定对话的主题以及问题期望的回答方向。\n2. 判断答案是否能够与前面的对话内容形成逻辑上的衔接,是否符合常理,能否在这个上下文中合理存在。\n3. 根据以上因素,对该回答的合理性进行打分,分数从1到5,其中1表示不合理,5表示非常合理,并且能够与前面的对话内容形成逻辑上的衔接,并符合常理。\n\n合理性:"
|
||||
"reasonableness": "1. 阅读题目,确定对话的主题以及问题期望的回答方向。\n2. 判断答案是否能够与前面的对话内容形成逻辑上的衔接,是否符合常理,能否在这个上下文中合理存在。\n3. 根据以上因素,对该回答的合理性进行打分,分数从1到5,其中1表示不合理,5表示非常合理,并且能够与前面的对话内容形成逻辑上的衔接,并符合常理。\n\n合理性:",
|
||||
"fidelity": "1. 仔细阅读问题,了解角色在问题中的设定和表现,包括职业、背景、观点、性格等方面。\n阅读题目的请求,确认回答请求时需要注意的细节。\n3. 对比提供的回答与该角色的设定,评估回答是否能够严格遵守角色的设定。\n4. 结合以上评估结果给出保真度的评分,范围从1到5分,其中1分表示回答与角色设定完全不符,5分表示回答完全符合角色设定且满足给定请求。\n\n保真度:"
|
||||
},
|
||||
"prompt": "你是一个好助手。请你为下面的“补全对话”问题的答案打分。\n\n问题如下:\n\n{question}\n\n答案如下:\n\n{answer}\n\n评分的指标如下:\n\n{metric}\n\n请你遵照以下的评分步骤:\n\n{steps}"
|
||||
},
|
||||
|
|
|
@ -26,14 +26,16 @@
|
|||
"relevance": "Relevance (1-5): whether the content of the answer is relevant to the topic, does not answer the wrong question, and strictly follows the requirements of the topic.",
|
||||
"naturalness": "Naturalness (1-5): whether the answer is natural and fits the identity given by the question.",
|
||||
"engagingness": "Engagingness (1-5): whether the answer responds appropriately to the content of the preceding conversation and whether it understands the context and background of the conversation.",
|
||||
"reasonableness": "Reasonableness (1-5): Whether the answer can form a logical connection with the content of the previous dialogue, whether it is consistent with common sense, and whether it can reasonably exist in this context."
|
||||
"reasonableness": "Reasonableness (1-5): Whether the answer can form a logical connection with the content of the previous dialogue, whether it is consistent with common sense, and whether it can reasonably exist in this context.",
|
||||
"fidelity": "Fidelity (1-5): whether the answer is able to answer the given request in strict compliance with the role setting."
|
||||
},
|
||||
"CoT": {
|
||||
"language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the language organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good language organization and 1 indicates very poor language organization.\n\nLanguage organization:",
|
||||
"relevance": "1. Read the question to determine what the question asks and what aspects of the question need to be answered.\n2. Read the answers to make sure that they directly answer the question asked.\n3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.\n4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all.\n\nRelevance:",
|
||||
"naturalness": "1. Read the question and determine the identity information provided in the question.\n2. Check whether the content of the answer matches the identity given in the question.\n3. Based on the above factors, score the naturalness of the response on a scale from 1 to 5, where 1 means unnatural and 5 means very natural and in accordance with the identity given in the question.\n\nNaturalness:",
|
||||
"engagingness": "1. Read the questions to determine the context and background of the dialogue.\n2. Check that the answer fully understands the context and background of the conversation and that it fits naturally into the conversation without seeming abrupt.\n3. Based on the above factors, rate the response's engagement on a scale from 1 to 5, where 1 means not engaged and 5 means very engaged and appropriately understands the context and background of the conversation.\n\nEngagingness:",
|
||||
"reasonableness": "1. Read the question and determine the topic of the conversation and the direction the question expects the answer to go.\n2. Determine whether the answer can be logically connected to the preceding conversation, whether it makes common sense, and whether it can reasonably exist in this context.\n3. Based on the above factors, rate the reasonableness of the answer on a scale from 1 to 5, where 1 means unreasonable and 5 means very reasonable and able to form a logical connection with the preceding dialogue content and consistent with common sense.\n\nReasonableness:"
|
||||
"reasonableness": "1. Read the question and determine the topic of the conversation and the direction the question expects the answer to go.\n2. Determine whether the answer can be logically connected to the preceding conversation, whether it makes common sense, and whether it can reasonably exist in this context.\n3. Based on the above factors, rate the reasonableness of the answer on a scale from 1 to 5, where 1 means unreasonable and 5 means very reasonable and able to form a logical connection with the preceding dialogue content and consistent with common sense.\n\nReasonableness:",
|
||||
"fidelity": "1. Read the question carefully to understand how the character is set up and represented in the question, including aspects such as occupation, background, point of view, and personality.\n2. Read the question's request and confirm the details that need to be taken into account when answering the request.\n3. Compare the provided answer with the setting of the role and assess whether the answer can strictly adhere to the setting of the role.\n4. Combine the results of the above assessment to give a fidelity score ranging from 1 to 5, where a score of 1 means that the response does not match the persona at all, and a score of 5 means that the response fully complies with the persona and satisfies the given request.\n\nFidelity:"
|
||||
},
|
||||
"prompt": "You are a good assistant. Please rate the given answer to the \"chat\" question below.\n\nThe question is as follows:\n\n{question}\n\nThe answer is as follows:\n\n{answer}\n\nThe metric for evaluation is as follows:\n\n{metric}\n\nYou should follow the following evaluation steps:\n\n{steps}"
|
||||
},
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
pandas>=1.4.1
|
||||
sentencepiece
|
||||
colossalai==0.3.1
|
|
@ -9,13 +9,15 @@ from coati.models.bloom import BLOOMActor
|
|||
from coati.models.gpt import GPTActor
|
||||
from coati.models.llama import LlamaActor
|
||||
from coati.models.opt import OPTActor
|
||||
from coati.models.chatglm import ChatGLMActor
|
||||
from coati.trainer import SFTTrainer
|
||||
from coati.trainer.strategies import DDPStrategy, GeminiStrategy, LowLevelZeroStrategy
|
||||
from datasets import load_dataset
|
||||
from torch.optim import Adam
|
||||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data.distributed import DistributedSampler
|
||||
from transformers import AutoTokenizer, BloomTokenizerFast, LlamaTokenizer
|
||||
from transformers import AutoTokenizer, BloomTokenizerFast, LlamaTokenizer, AutoModel
|
||||
from coati.models.chatglm.chatglm_tokenizer import ChatGLMTokenizer
|
||||
from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
|
||||
from transformers.trainer import get_scheduler
|
||||
|
||||
|
@ -58,6 +60,8 @@ def train(args):
|
|||
model = LlamaActor(pretrained=args.pretrain,
|
||||
lora_rank=args.lora_rank,
|
||||
checkpoint=args.grad_checkpoint)
|
||||
elif args.model == 'chatglm':
|
||||
model = ChatGLMActor(pretrained=args.pretrain)
|
||||
else:
|
||||
raise ValueError(f'Unsupported model "{args.model}"')
|
||||
|
||||
|
@ -81,6 +85,9 @@ def train(args):
|
|||
"hf-internal-testing/llama-tokenizer" if args.tokenizer is None else args.tokenizer)
|
||||
tokenizer.eos_token = '<\s>'
|
||||
tokenizer.pad_token = tokenizer.unk_token
|
||||
elif args.model == 'chatglm':
|
||||
tokenizer = ChatGLMTokenizer.from_pretrained(
|
||||
"THUDM/chatglm-6b" if args.tokenizer is None else args.tokenizer, trust_remote_code=True)
|
||||
else:
|
||||
raise ValueError(f'Unsupported model "{args.model}"')
|
||||
|
||||
|
@ -99,7 +106,6 @@ def train(args):
|
|||
optim = HybridAdam(model.parameters(), lr=args.lr, clipping_norm=1.0)
|
||||
else:
|
||||
optim = Adam(model.parameters(), lr=args.lr)
|
||||
|
||||
logger = get_dist_logger()
|
||||
|
||||
# configure dataset
|
||||
|
@ -185,7 +191,7 @@ if __name__ == '__main__':
|
|||
parser.add_argument('--strategy',
|
||||
choices=['ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_zero2_cpu'],
|
||||
default='colossalai_zero2')
|
||||
parser.add_argument('--model', choices=['gpt2', 'bloom', 'opt', 'llama'], default='bloom')
|
||||
parser.add_argument('--model', choices=['gpt2', 'bloom', 'opt', 'llama', 'chatglm'], default='bloom')
|
||||
parser.add_argument('--tokenizer', type=str, default=None)
|
||||
parser.add_argument('--pretrain', type=str, default=None)
|
||||
parser.add_argument('--dataset', type=str, default=None)
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
pytest
|
||||
colossalai==0.3.1
|
|
@ -2,7 +2,7 @@ transformers>=4.20.1
|
|||
tqdm
|
||||
datasets
|
||||
loralib
|
||||
colossalai>=0.2.4
|
||||
colossalai==0.3.1
|
||||
torch<2.0.0, >=1.12.1
|
||||
langchain
|
||||
tokenizers
|
||||
|
|
|
@ -11,7 +11,7 @@ from coati.dataset.sft_dataset import IGNORE_INDEX, SFTDataset, SupervisedDatase
|
|||
from datasets import load_dataset
|
||||
from transformers import AutoTokenizer, BloomTokenizerFast, LlamaTokenizer, PreTrainedTokenizer
|
||||
from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
|
||||
|
||||
from coati.models.chatglm.chatglm_tokenizer import ChatGLMTokenizer
|
||||
SFT_DATASET = [
|
||||
{
|
||||
"instruction":
|
||||
|
@ -80,6 +80,8 @@ def make_tokenizer(model: str):
|
|||
elif model == "llama":
|
||||
tokenizer = LlamaTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer")
|
||||
tokenizer.pad_token = tokenizer.unk_token
|
||||
elif model == "chatglm":
|
||||
tokenizer = ChatGLMTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
||||
else:
|
||||
raise ValueError(f"Unsupported model '{model}'")
|
||||
return tokenizer
|
||||
|
@ -93,13 +95,19 @@ def check_content(input_ids_stripped: torch.Tensor, tokenizer: PreTrainedTokeniz
|
|||
elif model == "llama":
|
||||
assert input_ids_stripped[0] == tokenizer.bos_token_id
|
||||
input_ids_stripped = input_ids_stripped[1:]
|
||||
|
||||
elif model == "chatglm":
|
||||
assert input_ids_stripped[0] == tokenizer.bos_token_id
|
||||
assert input_ids_stripped[-1] == tokenizer.eos_token_id
|
||||
input_ids_stripped = input_ids_stripped[1:-1]
|
||||
assert torch.all(input_ids_stripped != tokenizer.pad_token_id)
|
||||
assert torch.all(input_ids_stripped != tokenizer.bos_token_id)
|
||||
assert torch.all(input_ids_stripped != tokenizer.eos_token_id)
|
||||
assert input_ids_stripped != tokenizer.sep_token_id
|
||||
assert input_ids_stripped != tokenizer.cls_token_id
|
||||
assert input_ids_stripped != tokenizer.mask_token_id
|
||||
if model == "chatglm":
|
||||
assert torch.all(input_ids_stripped != tokenizer.mask_token_id)
|
||||
else:
|
||||
assert input_ids_stripped != tokenizer.mask_token_id
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model", ["gpt2", "bloom", "opt", "llama"])
|
||||
|
@ -190,7 +198,8 @@ def test_reward_dataset(model: str, dataset_path: str, subset: Optional[str], ma
|
|||
assert torch.all(r_mask)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model", ["gpt2", "bloom", "opt", "llama"])
|
||||
|
||||
@pytest.mark.parametrize("model", ["gpt2", "bloom", "opt", "llama", "chatglm"])
|
||||
@pytest.mark.parametrize("dataset_path", ["yizhongw/self_instruct", None])
|
||||
@pytest.mark.parametrize("max_dataset_size", [2])
|
||||
@pytest.mark.parametrize("max_length", [32, 1024])
|
||||
|
@ -211,6 +220,19 @@ def test_sft_dataset(model: str, dataset_path: Optional[str], max_dataset_size:
|
|||
max_length=max_length)
|
||||
assert len(sft_dataset) == min(max_dataset_size, len(SFT_DATASET))
|
||||
|
||||
if isinstance(tokenizer, ChatGLMTokenizer):
|
||||
for i in range(max_dataset_size):
|
||||
assert isinstance(sft_dataset[i], dict)
|
||||
assert list(sft_dataset[i].keys()) == ["input_ids", "labels"]
|
||||
input_ids = sft_dataset[i]["input_ids"]
|
||||
labels = sft_dataset[i]["labels"]
|
||||
assert input_ids.shape == labels.shape == torch.Size([max_length])
|
||||
|
||||
ignore_mask = labels == IGNORE_INDEX
|
||||
assert input_ids.masked_select(torch.logical_not(ignore_mask))[0] == tokenizer.bos_token_id
|
||||
check_content(input_ids.masked_select(torch.logical_not(ignore_mask)), tokenizer, model)
|
||||
return
|
||||
|
||||
for i in range(max_dataset_size):
|
||||
assert isinstance(sft_dataset[i], dict)
|
||||
assert list(sft_dataset[i].keys()) == ["input_ids", "labels", "attention_mask"]
|
||||
|
@ -238,4 +260,7 @@ if __name__ == "__main__":
|
|||
max_datasets_size=8,
|
||||
max_length=256)
|
||||
|
||||
test_prompt_dataset(model="opt", max_datasets_size=2, max_length=128)
|
||||
test_prompt_dataset(model="opt",
|
||||
max_datasets_size=2,
|
||||
max_length=128)
|
||||
|
||||
|
|
|
@ -9,11 +9,12 @@ from coati.models.bloom import BLOOMRM, BLOOMActor, BLOOMCritic
|
|||
from coati.models.generation import generate
|
||||
from coati.models.gpt import GPTRM, GPTActor, GPTCritic
|
||||
from coati.models.llama import LlamaActor, LlamaCritic, LlamaRM
|
||||
from coati.models.chatglm import ChatGLMActor
|
||||
from coati.models.lora import LoraLinear, convert_to_lora_module
|
||||
from coati.models.loss import GPTLMLoss, LogExpLoss, LogSigLoss, PolicyLoss, ValueLoss
|
||||
from coati.models.opt import OPTRM, OPTActor, OPTCritic
|
||||
from coati.models.utils import calc_action_log_probs, compute_reward, masked_mean
|
||||
|
||||
from coati.models.chatglm.chatglm_tokenizer import ChatGLMTokenizer
|
||||
|
||||
@pytest.mark.parametrize("batch_size", [4])
|
||||
@pytest.mark.parametrize("seq_len", [32])
|
||||
|
@ -24,8 +25,10 @@ from coati.models.utils import calc_action_log_probs, compute_reward, masked_mea
|
|||
lambda: GPTActor(),
|
||||
# HACK: skip llama due to long execution time
|
||||
# lambda: LlamaActor(),
|
||||
lambda: OPTActor()
|
||||
])
|
||||
lambda: OPTActor(),
|
||||
# lambda: ChatGLMActor(),
|
||||
])
|
||||
|
||||
@pytest.mark.parametrize("generate_kwargs", [{
|
||||
"max_length": 64,
|
||||
"use_cache": True,
|
||||
|
@ -115,11 +118,13 @@ def test_lora(lora_rank: int, num_dim: int, num_layers: int):
|
|||
lambda: (GPTActor(), GPTCritic(), GPTRM()),
|
||||
# HACK: skip llama due to long execution time
|
||||
# lambda: (LlamaActor(), LlamaCritic(), LlamaRM()),
|
||||
lambda: (OPTActor(), OPTCritic(), OPTRM()),
|
||||
])
|
||||
lambda: (OPTActor(), OPTCritic(), OPTRM()),
|
||||
lambda: (ChatGLMActor(), None, None),
|
||||
])
|
||||
@torch.no_grad()
|
||||
def test_models(models_maker: Callable[[], Tuple[Actor, Critic, RewardModel]], batch_size: int, seq_len: int):
|
||||
|
||||
def test_models(models_maker: Callable[[], Tuple[Actor, Critic, RewardModel]],
|
||||
batch_size: int,
|
||||
seq_len: int):
|
||||
actor_input = {
|
||||
"input_ids": torch.randint(0, 100, (batch_size, seq_len)),
|
||||
"attention_mask": torch.randint(0, 2, (batch_size, seq_len))
|
||||
|
@ -135,20 +140,30 @@ def test_models(models_maker: Callable[[], Tuple[Actor, Critic, RewardModel]], b
|
|||
}
|
||||
|
||||
actor, critic, rm = models_maker()
|
||||
if isinstance(actor, ChatGLMActor):
|
||||
actor = actor.float()
|
||||
tokenizer = ChatGLMTokenizer.from_pretrained( "THUDM/chatglm-6b", trust_remote_code=True)
|
||||
chatglm_special_token = torch.tensor([tokenizer.gmask_token_id, tokenizer.bos_token_id]).repeat(batch_size, 1)
|
||||
actor_input ={
|
||||
"input_ids": torch.cat((torch.randint(0, 100, (batch_size, seq_len//2)), chatglm_special_token, torch.randint(0, 100, (batch_size, seq_len//2 - 2))), dim=1),
|
||||
"attention_mask": torch.randint(0, 2, (batch_size, 1, seq_len, seq_len))
|
||||
}
|
||||
assert isinstance(actor, Actor)
|
||||
base_actor_model = get_base_model(actor)
|
||||
assert isinstance(critic, Critic)
|
||||
base_critic_model = get_base_model(critic)
|
||||
assert isinstance(rm, RewardModel)
|
||||
base_rm_model = get_base_model(rm)
|
||||
|
||||
actor_output = actor(**actor_input)
|
||||
critic_output = critic(**critic_input)
|
||||
rm_output = rm(**rm_input)
|
||||
|
||||
assert actor_output.logits.shape[:2] == (batch_size, seq_len)
|
||||
assert critic_output.shape == (batch_size,)
|
||||
assert rm_output.shape == (batch_size,)
|
||||
|
||||
if critic:
|
||||
assert isinstance(critic, Critic)
|
||||
base_critic_model = get_base_model(critic)
|
||||
critic_output = critic(**critic_input)
|
||||
assert critic_output.shape == (batch_size, )
|
||||
|
||||
if rm:
|
||||
assert isinstance(rm, RewardModel)
|
||||
base_rm_model = get_base_model(rm)
|
||||
rm_output = rm(**rm_input)
|
||||
assert rm_output.shape == (batch_size, )
|
||||
|
||||
|
||||
@pytest.mark.parametrize("batch_size", [16])
|
||||
|
@ -203,4 +218,4 @@ if __name__ == "__main__":
|
|||
|
||||
test_models(models_maker=lambda: (BLOOMActor(), BLOOMCritic(), BLOOMRM()), batch_size=8, seq_len=128)
|
||||
|
||||
test_loss(batch_size=8, seq_len=128, num_labels=100)
|
||||
test_loss(batch_size=8, seq_len=128, num_labels=100)
|
|
@ -144,7 +144,7 @@ def size_value_converting_pass(gm: torch.fx.GraphModule, device_mesh: DeviceMesh
|
|||
|
||||
# DeviceMesh information instructs the scaling of the size value
|
||||
device_mesh_info = {}
|
||||
for dim, dim_size in enumerate(device_mesh.mesh_shape):
|
||||
for dim, dim_size in enumerate(device_mesh.shape):
|
||||
device_mesh_info[dim] = dim_size
|
||||
|
||||
def _extract_target_dim(node):
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
import gc
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from typing import Callable, Iterator, List, Optional, Tuple, Union
|
||||
from typing import Callable, Iterator, List, Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch import Tensor
|
||||
from torch.optim import Optimizer
|
||||
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
|
||||
from torch.utils.data import DataLoader
|
||||
|
@ -16,7 +14,6 @@ from colossalai.checkpoint_io import CheckpointIndexFile, CheckpointIO, GeneralC
|
|||
from colossalai.checkpoint_io.utils import (
|
||||
get_model_base_filenames,
|
||||
get_optimizer_base_filenames,
|
||||
get_shard_filename,
|
||||
load_shard_state_dict,
|
||||
save_config_file,
|
||||
save_state_dict,
|
||||
|
@ -25,8 +22,7 @@ from colossalai.checkpoint_io.utils import (
|
|||
from colossalai.cluster import DistCoordinator
|
||||
from colossalai.interface import ModelWrapper, OptimizerWrapper
|
||||
from colossalai.utils import get_current_device
|
||||
from colossalai.zero import GeminiDDP, zero_model_wrapper, zero_optim_wrapper
|
||||
from colossalai.zero.gemini import ZeroOptimizer
|
||||
from colossalai.zero import GeminiDDP, GeminiOptimizer
|
||||
from colossalai.zero.gemini.memory_tracer import MemStats
|
||||
|
||||
from .dp_plugin_base import DPPluginBase
|
||||
|
@ -134,11 +130,7 @@ class GeminiCheckpointIO(GeneralCheckpointIO):
|
|||
As there is communication when getting state dict, this must be called on all processes.
|
||||
"""
|
||||
|
||||
# If optimizer is wrapped, unwrap it.
|
||||
if isinstance(optimizer, OptimizerWrapper):
|
||||
optimizer = optimizer.unwrap()
|
||||
|
||||
assert isinstance(optimizer, ZeroOptimizer)
|
||||
assert isinstance(optimizer, GeminiOptimizer)
|
||||
|
||||
if os.path.isfile(checkpoint):
|
||||
logging.error(f"Provided path ({checkpoint}) should be a directory, not a file")
|
||||
|
@ -185,11 +177,7 @@ class GeminiCheckpointIO(GeneralCheckpointIO):
|
|||
if not os.path.isfile(checkpoint_index_file):
|
||||
logging.error(f"Provided path ({checkpoint_index_file}) should be a file")
|
||||
|
||||
# If optimizer is wrapped, unwrap it.
|
||||
if isinstance(optimizer, OptimizerWrapper):
|
||||
optimizer = optimizer.unwrap()
|
||||
|
||||
assert isinstance(optimizer, ZeroOptimizer)
|
||||
assert isinstance(optimizer, GeminiOptimizer)
|
||||
|
||||
# Read checkpoint index file.
|
||||
ckpt_index_file = CheckpointIndexFile.from_file(checkpoint_index_file)
|
||||
|
@ -222,47 +210,6 @@ class GeminiCheckpointIO(GeneralCheckpointIO):
|
|||
super().save_lr_scheduler(lr_scheduler, checkpoint)
|
||||
|
||||
|
||||
class GeminiModel(ModelWrapper):
|
||||
|
||||
def __init__(self, module: nn.Module, gemini_config: dict, verbose: bool = False) -> None:
|
||||
super().__init__(module)
|
||||
self.module = zero_model_wrapper(module, zero_stage=3, gemini_config=gemini_config, verbose=verbose)
|
||||
|
||||
def unwrap(self):
|
||||
# as save/load state dict is coupled with the GeminiDDP, we only return GeminiDDP model
|
||||
return self.module
|
||||
|
||||
|
||||
class GeminiOptimizer(OptimizerWrapper):
|
||||
|
||||
def __init__(self,
|
||||
module: GeminiDDP,
|
||||
optimizer: Optimizer,
|
||||
zero_optim_config: dict,
|
||||
optim_kwargs: dict,
|
||||
verbose: bool = False) -> None:
|
||||
optimizer = zero_optim_wrapper(module,
|
||||
optimizer,
|
||||
optim_config=zero_optim_config,
|
||||
**optim_kwargs,
|
||||
verbose=verbose)
|
||||
super().__init__(optimizer)
|
||||
|
||||
def backward(self, loss: Tensor, *args, **kwargs):
|
||||
self.optim.backward(loss)
|
||||
|
||||
def clip_grad_by_norm(self,
|
||||
max_norm: Union[float, int],
|
||||
norm_type: Union[float, int] = 2,
|
||||
error_if_nonfinite: bool = False,
|
||||
*args,
|
||||
**kwargs) -> Tensor:
|
||||
warnings.warn(f'Gemini controls grad clipping by itself, so you should not use clip_grad_by_norm')
|
||||
|
||||
def clip_grad_by_value(self, clip_value: float, *args, **kwargs) -> None:
|
||||
raise NotImplementedError('Gemini does not support clip_grad_by_value')
|
||||
|
||||
|
||||
class GeminiPlugin(DPPluginBase):
|
||||
"""
|
||||
Plugin for Gemini.
|
||||
|
@ -279,8 +226,20 @@ class GeminiPlugin(DPPluginBase):
|
|||
>>> model, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)
|
||||
|
||||
Args:
|
||||
device (torch.device): device to place the model.
|
||||
placement_policy (str, optional): "cpu", "cuda", "auto". Defaults to "cpu".
|
||||
chunk_config_dict (dict, optional): chunk configuration dictionary.
|
||||
chunk_init_device (torch.device, optional): device to initialize the chunk.
|
||||
placement_policy (str, optional): "static" and "auto". Defaults to "static".
|
||||
shard_param_frac (float, optional): fraction of parameters to be sharded. Only for "static" placement.
|
||||
If `shard_param_frac` is 1.0, it's equal to zero-3. If `shard_param_frac` is 0.0, it's equal to zero-2. Defaults to 1.0.
|
||||
offload_optim_frac (float, optional): fraction of optimizer states to be offloaded. Only for "static" placement.
|
||||
If `shard_param_frac` is 1.0 and `offload_optim_frac` is 0.0, it's equal to old "cuda" placement. Defaults to 0.0.
|
||||
offload_param_frac (float, optional): fraction of parameters to be offloaded. Only for "static" placement.
|
||||
For efficiency, this argument is useful only when `shard_param_frac` is 1.0 and `offload_optim_frac` is 1.0.
|
||||
If `shard_param_frac` is 1.0, `offload_optim_frac` is 1.0 and `offload_param_frac` is 1.0, it's equal to old "cpu" placement.
|
||||
When using static placement, we recommend users to tune `shard_param_frac` first and then `offload_optim_frac`.
|
||||
Defaults to 0.0.
|
||||
warmup_non_model_data_ratio (float, optional): ratio of expected non-model data memory during warmup. Only for "auto" placement. Defaults to 0.8.
|
||||
steady_cuda_cap_ratio (float, optional): ratio of allowed cuda capacity for model data during steady state. Only for "auto" placement. Defaults to 0.9.
|
||||
precision (str, optional): precision. Support 'fp16' and 'bf16'. Defaults to 'fp16'.
|
||||
pin_memory (bool, optional): use pin memory on CPU. Defaults to False.
|
||||
force_outputs_fp32 (bool, optional): force outputs are fp32. Defaults to False.
|
||||
|
@ -312,8 +271,14 @@ class GeminiPlugin(DPPluginBase):
|
|||
|
||||
def __init__(
|
||||
self,
|
||||
device: Optional[torch.device] = None,
|
||||
placement_policy: str = "cpu",
|
||||
chunk_config_dict: Optional[dict] = None,
|
||||
chunk_init_device: Optional[torch.device] = None,
|
||||
placement_policy: str = "static",
|
||||
shard_param_frac: float = 1.0, # only for static placement
|
||||
offload_optim_frac: float = 0.0, # only for static placement
|
||||
offload_param_frac: float = 0.0, # only for static placement
|
||||
warmup_non_model_data_ratio: float = 0.8, # only for auto placement
|
||||
steady_cuda_cap_ratio: float = 0.9, # only for auto placement
|
||||
precision: str = "fp16",
|
||||
pin_memory: bool = False,
|
||||
force_outputs_fp32: bool = False,
|
||||
|
@ -337,8 +302,14 @@ class GeminiPlugin(DPPluginBase):
|
|||
super().__init__()
|
||||
assert precision in SUPPORTED_PRECISION, f'precision {precision} is not supported'
|
||||
self.gemini_config = dict(
|
||||
device=(device or get_current_device()),
|
||||
chunk_config_dict=chunk_config_dict,
|
||||
chunk_init_device=(chunk_init_device or get_current_device()),
|
||||
placement_policy=placement_policy,
|
||||
shard_param_frac=shard_param_frac,
|
||||
offload_optim_frac=offload_optim_frac,
|
||||
offload_param_frac=offload_param_frac,
|
||||
warmup_non_model_data_ratio=warmup_non_model_data_ratio,
|
||||
steady_cuda_cap_ratio=steady_cuda_cap_ratio,
|
||||
pin_memory=pin_memory,
|
||||
force_outputs_fp32=force_outputs_fp32,
|
||||
strict_ddp_mode=strict_ddp_mode,
|
||||
|
@ -395,12 +366,15 @@ class GeminiPlugin(DPPluginBase):
|
|||
# model = nn.SyncBatchNorm.convert_sync_batchnorm(model, None)
|
||||
|
||||
# wrap the model with Gemini
|
||||
model = GeminiModel(model, self.gemini_config, self.verbose)
|
||||
model = GeminiDDP(model, **self.gemini_config, verbose=self.verbose)
|
||||
|
||||
if optimizer is not None and \
|
||||
not isinstance(optimizer, OptimizerWrapper):
|
||||
optimizer = GeminiOptimizer(model.unwrap(), optimizer, self.zero_optim_config, self.optim_kwargs,
|
||||
self.verbose)
|
||||
optimizer = GeminiOptimizer(optimizer,
|
||||
model.unwrap(),
|
||||
**self.zero_optim_config,
|
||||
**self.optim_kwargs,
|
||||
verbose=self.verbose)
|
||||
|
||||
return model, optimizer, criterion, dataloader, lr_scheduler
|
||||
|
||||
|
|
|
@ -17,8 +17,13 @@ from colossalai.checkpoint_io import CheckpointIndexFile, CheckpointIO
|
|||
from colossalai.checkpoint_io.utils import (
|
||||
get_optimizer_base_filenames,
|
||||
get_shard_filename,
|
||||
load_param_groups_into_optimizer,
|
||||
load_shard_state_dict,
|
||||
load_states_into_optimizer,
|
||||
save_param_groups,
|
||||
save_state_dict,
|
||||
sharded_optimizer_loading_epilogue,
|
||||
unwrap_optimizer,
|
||||
)
|
||||
from colossalai.interface import ModelWrapper, OptimizerWrapper
|
||||
from colossalai.utils import get_current_device
|
||||
|
@ -126,19 +131,39 @@ class LowLevelZeroCheckpointIO(TorchDDPCheckpointIO):
|
|||
index_file_path (str): Path to the index file
|
||||
prefix (str): Not used.
|
||||
"""
|
||||
super().load_sharded_optimizer(optimizer, index_file_path, prefix)
|
||||
current_rank_state_dict = optimizer.optim.state_dict()['state']
|
||||
for param_idx, state in current_rank_state_dict.items():
|
||||
for k, v in state.items():
|
||||
if isinstance(v, torch.Tensor) and k != 'step':
|
||||
padding_size = (self.coordinator.world_size -
|
||||
v.numel() % self.coordinator.world_size) % self.coordinator.world_size
|
||||
with torch.no_grad():
|
||||
v = v.flatten()
|
||||
if padding_size > 0:
|
||||
v = torch.nn.functional.pad(v, [0, padding_size])
|
||||
v_list = v.split(v.numel() // self.coordinator.world_size)
|
||||
current_rank_state_dict[param_idx][k] = v_list[self.coordinator.rank].detach()
|
||||
# If optimizer is wrapped, unwrap it.
|
||||
if isinstance(optimizer, OptimizerWrapper):
|
||||
optimizer = unwrap_optimizer(optimizer)
|
||||
|
||||
# Read checkpoint index file.
|
||||
ckpt_index_file = CheckpointIndexFile.from_file(index_file_path)
|
||||
|
||||
# Load param_groups
|
||||
param_group_path = ckpt_index_file.get_param_group_filename()
|
||||
if param_group_path is None:
|
||||
raise RuntimeError(f'Invalid index file path {index_file_path} for an optimizer. \
|
||||
Lacking param group file under current directory.')
|
||||
id_map = load_param_groups_into_optimizer(optimizer, param_group_path)
|
||||
|
||||
checkpoint_files, _ = ckpt_index_file.get_checkpoint_filenames()
|
||||
|
||||
for shard_file in checkpoint_files:
|
||||
state_dict = load_shard_state_dict(Path(shard_file), use_safetensors=False)
|
||||
# shard state dict
|
||||
for param_idx, state in state_dict.items():
|
||||
for k, v in state.items():
|
||||
if isinstance(v, torch.Tensor) and k != 'step':
|
||||
padding_size = (self.coordinator.world_size -
|
||||
v.numel() % self.coordinator.world_size) % self.coordinator.world_size
|
||||
with torch.no_grad():
|
||||
v = v.flatten()
|
||||
if padding_size > 0:
|
||||
v = torch.nn.functional.pad(v, [0, padding_size])
|
||||
v_list = v.split(v.numel() // self.coordinator.world_size)
|
||||
state_dict[param_idx][k] = v_list[self.coordinator.rank].detach().clone()
|
||||
load_states_into_optimizer(optimizer, state_dict, id_map)
|
||||
|
||||
sharded_optimizer_loading_epilogue(optimizer)
|
||||
|
||||
|
||||
class LowLevelZeroModel(ModelWrapper):
|
||||
|
|
|
@ -79,8 +79,6 @@ class GeneralCheckpointIO(CheckpointIO):
|
|||
for shard_file in checkpoint_files:
|
||||
state_dict = load_shard_state_dict(Path(shard_file), use_safetensors=False)
|
||||
load_states_into_optimizer(optimizer, state_dict, id_map)
|
||||
del state_dict
|
||||
gc.collect()
|
||||
|
||||
sharded_optimizer_loading_epilogue(optimizer)
|
||||
|
||||
|
|
|
@ -514,7 +514,7 @@ def load_shard_state_dict(checkpoint_file: Path, use_safetensors: bool = False):
|
|||
f"Conversion from a {metadata['format']} safetensors archive to PyTorch is not implemented yet.")
|
||||
return safe_load_file(checkpoint_file)
|
||||
else:
|
||||
return torch.load(checkpoint_file)
|
||||
return torch.load(checkpoint_file, map_location=torch.device('cpu'))
|
||||
|
||||
|
||||
def load_state_dict_into_model(model: nn.Module,
|
||||
|
@ -574,7 +574,7 @@ def load_param_groups_into_optimizer(optimizer: Optimizer, param_group_path: str
|
|||
|
||||
# Load list of param_groups from given file path.
|
||||
# The params in saved_groups are in the form of integer indices.
|
||||
saved_groups = torch.load(param_group_path)
|
||||
saved_groups = torch.load(param_group_path, map_location=torch.device('cpu'))
|
||||
if not isinstance(saved_groups, List):
|
||||
raise ValueError(f'The param_groups saved at {param_group_path} is not of List type')
|
||||
|
||||
|
@ -730,7 +730,7 @@ def load_state_dict(checkpoint_file_path: Path):
|
|||
|
||||
else:
|
||||
# load with torch
|
||||
return torch.load(checkpoint_file_path)
|
||||
return torch.load(checkpoint_file_path, map_location=torch.device('cpu'))
|
||||
|
||||
|
||||
def add_prefix(weights_name: str, prefix: Optional[str] = None) -> str:
|
||||
|
|
|
@ -265,6 +265,10 @@ def launch_multi_processes(args: Config) -> None:
|
|||
# establish remote connection
|
||||
runner.connect(host_info_list=active_device_pool, workdir=curr_path, env=env)
|
||||
|
||||
# overwrite master addr when num_nodes > 1 and not specified
|
||||
if len(active_device_pool) > 1 and args.master_addr == "127.0.0.1":
|
||||
args.master_addr = active_device_pool.hostinfo_list[0].hostname
|
||||
|
||||
# execute distributed launching command
|
||||
for node_id, hostinfo in enumerate(active_device_pool):
|
||||
cmd = get_launch_command(master_addr=args.master_addr,
|
||||
|
|
|
@ -2,7 +2,13 @@ import warnings
|
|||
|
||||
HAS_MEM_EFF_ATTN = False
|
||||
try:
|
||||
from xformers.ops.fmha import memory_efficient_attention
|
||||
from xformers.ops.fmha import MemoryEfficientAttentionCutlassOp, memory_efficient_attention
|
||||
from xformers.ops.fmha.attn_bias import (
|
||||
BlockDiagonalCausalMask,
|
||||
BlockDiagonalMask,
|
||||
LowerTriangularMask,
|
||||
LowerTriangularMaskWithTensorBias,
|
||||
)
|
||||
HAS_MEM_EFF_ATTN = True
|
||||
except ImportError:
|
||||
warnings.warn('please install xformers from https://github.com/facebookresearch/xformers')
|
||||
|
@ -16,13 +22,6 @@ if HAS_MEM_EFF_ATTN:
|
|||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from xformers.ops.fmha import MemoryEfficientAttentionCutlassOp
|
||||
from xformers.ops.fmha.attn_bias import (
|
||||
BlockDiagonalCausalMask,
|
||||
BlockDiagonalMask,
|
||||
LowerTriangularMask,
|
||||
LowerTriangularMaskWithTensorBias,
|
||||
)
|
||||
|
||||
from .utils import SeqLenInfo
|
||||
|
||||
|
|
|
@ -3,9 +3,15 @@ from typing import Optional
|
|||
import torch
|
||||
|
||||
from colossalai.tensor.colo_tensor import ColoTensor
|
||||
from colossalai.tensor.const import TensorType
|
||||
from colossalai.tensor.param_op_hook import ColoParamOpHookManager
|
||||
from colossalai.tensor.tensor_spec import ColoTensorSpec
|
||||
|
||||
from .colo_tensor import _convert_output
|
||||
|
||||
WHITE_LIST_FUNCS = {torch.Tensor.__getitem__}
|
||||
|
||||
|
||||
def is_no_hook_op(func) -> bool:
|
||||
return func.__name__.startswith('__') and func not in WHITE_LIST_FUNCS
|
||||
|
||||
|
||||
def filter_colo_parameters(*args, **kwargs):
|
||||
|
@ -41,53 +47,25 @@ class ColoParameter(ColoTensor, torch.nn.Parameter):
|
|||
|
||||
"""
|
||||
|
||||
def __new__(cls,
|
||||
data: Optional[torch.Tensor] = None,
|
||||
requires_grad: bool = True,
|
||||
spec: ColoTensorSpec = None) -> 'ColoParameter':
|
||||
def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad: bool = True) -> 'ColoParameter':
|
||||
if data is None:
|
||||
data = torch.empty(0)
|
||||
return torch.Tensor._make_subclass(cls, data, requires_grad)
|
||||
|
||||
def __init__(self,
|
||||
data: Optional[torch.Tensor] = None,
|
||||
requires_grad: bool = True,
|
||||
spec: ColoTensorSpec = None) -> None:
|
||||
ColoTensor.__init__(self, data, spec)
|
||||
self._type = TensorType.MODEL
|
||||
# a list contains modules sharing this ColoParameter with others.
|
||||
self._shared_param_modules = []
|
||||
|
||||
@property
|
||||
def shared_param_modules(self):
|
||||
return self._shared_param_modules
|
||||
|
||||
@staticmethod
|
||||
def from_torch_tensor(tensor: torch.Tensor,
|
||||
requires_grad: bool = True,
|
||||
spec: ColoTensorSpec = None) -> 'ColoParameter':
|
||||
tensor = tensor.as_subclass(ColoParameter)
|
||||
tensor.__init__(tensor, requires_grad=requires_grad, spec=spec)
|
||||
return tensor
|
||||
|
||||
def __repr__(self):
|
||||
return super(ColoParameter, self).__repr__()
|
||||
|
||||
@classmethod
|
||||
def __torch_function__(cls, func, types, args=..., kwargs=None):
|
||||
if ColoParamOpHookManager.has_hook():
|
||||
if not func.__name__.startswith('__'):
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
params = filter_colo_parameters(*args, **kwargs)
|
||||
if len(params) > 0:
|
||||
with torch._C.DisableTorchFunction():
|
||||
new_args = ColoParamOpHookManager.pre_op(params, *args, *kwargs.values())
|
||||
args, kwargs = replace_args(args, kwargs, new_args)
|
||||
ret = super().__torch_function__(func, types, args, kwargs)
|
||||
with torch._C.DisableTorchFunction():
|
||||
ret = ColoParamOpHookManager.post_op(params, ret)
|
||||
return ret
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
if ColoParamOpHookManager.has_hook() and not is_no_hook_op(func):
|
||||
params = filter_colo_parameters(*args, **kwargs)
|
||||
if len(params) > 0:
|
||||
with torch._C.DisableTorchFunction():
|
||||
new_args = ColoParamOpHookManager.pre_op(params, *args, *kwargs.values())
|
||||
args, kwargs = replace_args(args, kwargs, new_args)
|
||||
ret = super().__torch_function__(func, types, args, kwargs)
|
||||
with torch._C.DisableTorchFunction():
|
||||
ret = ColoParamOpHookManager.post_op(params, ret)
|
||||
return _convert_output(ret, func)
|
||||
return super().__torch_function__(func, types, args, kwargs)
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
|
@ -96,9 +74,7 @@ class ColoParameter(ColoTensor, torch.nn.Parameter):
|
|||
else:
|
||||
with torch._C.DisableTorchFunction():
|
||||
data = self.data.clone()
|
||||
tensor = ColoParameter(data,
|
||||
self.requires_grad,
|
||||
spec=ColoTensorSpec(self.get_process_group(), self.dist_spec, self.compute_spec))
|
||||
tensor = ColoParameter(data, self.requires_grad)
|
||||
memo[id(self)] = tensor
|
||||
return tensor
|
||||
|
||||
|
|
|
@ -1,17 +1,14 @@
|
|||
import operator
|
||||
from copy import copy
|
||||
from functools import lru_cache, reduce
|
||||
from typing import Callable, Optional, Set
|
||||
from functools import lru_cache
|
||||
from typing import Callable, Set
|
||||
|
||||
import torch
|
||||
|
||||
from colossalai.tensor.dist_spec_mgr import DistSpecManager
|
||||
from colossalai.tensor.distspec import DistPlacementPattern, ReplicaSpec, _DistSpec
|
||||
from colossalai.tensor.process_group import ProcessGroup
|
||||
from colossalai.tensor.tensor_spec import ColoTensorSpec
|
||||
|
||||
from .const import TensorType
|
||||
from .op_wrapper import _COLOSSAL_OPS
|
||||
INPALCE_MAPPING = {
|
||||
torch.Tensor.add_: torch.Tensor.add,
|
||||
torch.Tensor.sub_: torch.Tensor.sub,
|
||||
torch.Tensor.mul_: torch.Tensor.mul,
|
||||
torch.Tensor.div_: torch.Tensor.div
|
||||
}
|
||||
|
||||
|
||||
@lru_cache(None)
|
||||
|
@ -25,61 +22,37 @@ def _get_my_nowrap_functions() -> Set[Callable]:
|
|||
}
|
||||
|
||||
|
||||
def _convert_output(output, colo_spec: ColoTensorSpec):
|
||||
if type(output) == torch.Tensor:
|
||||
return ColoTensor.from_torch_tensor(output, colo_spec)
|
||||
def _convert(output):
|
||||
if isinstance(output, torch.Tensor) and not isinstance(output, ColoTensor):
|
||||
output.__class__ = ColoTensor
|
||||
elif isinstance(output, (list, tuple)):
|
||||
return type(output)(_convert_output(o, colo_spec) for o in output)
|
||||
else:
|
||||
output = type(output)(_convert(o) for o in output)
|
||||
return output
|
||||
|
||||
|
||||
def _convert_output(output, func):
|
||||
if func in _get_my_nowrap_functions():
|
||||
return output
|
||||
|
||||
|
||||
def _get_spec_from_args(args, kwargs) -> ColoTensorSpec:
|
||||
for elem in args:
|
||||
if isinstance(elem, ColoTensor):
|
||||
pg = elem.get_process_group()
|
||||
dp = elem.dist_spec
|
||||
return ColoTensorSpec(pg, dp)
|
||||
elif isinstance(elem, (list, tuple)):
|
||||
spec = _get_spec_from_args(elem, {})
|
||||
if spec is not None:
|
||||
return spec
|
||||
for k, v in kwargs.items():
|
||||
if isinstance(v, ColoTensor):
|
||||
pg = v.get_process_group()
|
||||
dp = v.dist_spec
|
||||
return ColoTensorSpec(pg, dp)
|
||||
return None
|
||||
return _convert(output)
|
||||
|
||||
|
||||
class ColoTensor(torch.Tensor):
|
||||
""" Data Structure for Tensor in Colossal-AI. It is a subclass of torch.Tensor.
|
||||
|
||||
The Colotensor can be initialized with a PyTorch tensor in the following ways.
|
||||
|
||||
>>> pg = ProcessGroup()
|
||||
>>> colo_t1 = ColoTensor(torch.randn(2,3), spec = ColoTensorSpec(pg, ReplicaSpec()))
|
||||
>>> # The tensor passed in is a tensor after sharding but not a global tensor.
|
||||
>>> shard_spec = ShardSpec(process_group=ProcessGroup(tp=world_size),
|
||||
>>> dims=[0],
|
||||
>>> num_partitions=[world_size])
|
||||
>>> tensor_spec = ColoTensorSpec(pg, shard_spec)
|
||||
>>> colo_t2 = ColoTensor.from_torch_tensor(t_ref.clone(), tensor_spec)
|
||||
It is only used to trigger the torch function hook.
|
||||
|
||||
Args:
|
||||
data (torch.Tensor): a torch tensor used as the payload the colotensor.
|
||||
spec (ColoTensorSpec, optional): the tensor spec of initialization. Defaults to ColoTensorSpec(ReplicaSpec()).
|
||||
"""
|
||||
torch_major = int(torch.__version__.split('.')[0])
|
||||
torch_minor = int(torch.__version__.split('.')[1])
|
||||
|
||||
def __new__(cls, data: torch.Tensor, spec: ColoTensorSpec) -> 'ColoTensor':
|
||||
def __new__(cls, data: torch.Tensor) -> 'ColoTensor':
|
||||
"""
|
||||
The signature of the __new__ has to be consistent with the torch.Tensor.
|
||||
|
||||
Args:
|
||||
data (torch.Tensor): a torch tensor used as the payload the colotensor.
|
||||
spec (TensorSpec, optional): the tensor spec of initialization.
|
||||
|
||||
Returns:
|
||||
ColoTensor: a ColoTensor wrappers the data.
|
||||
|
@ -88,86 +61,6 @@ class ColoTensor(torch.Tensor):
|
|||
data = torch.empty(0)
|
||||
return torch.Tensor._make_subclass(cls, data, data.requires_grad)
|
||||
|
||||
def __init__(self, data: torch.Tensor, spec: Optional[ColoTensorSpec] = None) -> None:
|
||||
# If not set spec, use a DP process group and replicate dist spec
|
||||
if spec is None:
|
||||
self.has_initialized = False
|
||||
self.dist_spec = ReplicaSpec()
|
||||
self.compute_spec = None
|
||||
self.process_group = ProcessGroup()
|
||||
else:
|
||||
self.has_initialized = True
|
||||
self.dist_spec = spec.dist_attr
|
||||
self.compute_spec = spec.compute_attr
|
||||
if spec.pg is None:
|
||||
self.process_group = ProcessGroup()
|
||||
else:
|
||||
self.process_group = spec.pg
|
||||
|
||||
self._type = TensorType.NONMODEL
|
||||
|
||||
def has_compute_spec(self) -> bool:
|
||||
return self.compute_spec is not None
|
||||
|
||||
def is_model_data(self) -> bool:
|
||||
return self._type == TensorType.MODEL
|
||||
|
||||
def get_process_group(self) -> 'ProcessGroup':
|
||||
return self.process_group
|
||||
|
||||
def set_process_group(self, pg: ProcessGroup):
|
||||
"""set_process_group
|
||||
change the pg of the ColoTensor. Note that the valid use cases is limited.
|
||||
It works for the target pg is DP and TP only and current dist spec of the Tensor is Replica.
|
||||
|
||||
Args:
|
||||
pg (ProcessGroup): target pg
|
||||
|
||||
"""
|
||||
assert isinstance(pg, ProcessGroup), f"pg as type {type(pg)} is invalid"
|
||||
# if the new pg is the same as the old pg, just returns
|
||||
if self.process_group == pg:
|
||||
return
|
||||
assert self.process_group.tp_world_size() == 1 or self.process_group.dp_world_size() == 1, \
|
||||
"Can not set_process_group on a ColoTensor whose process_group is both tp > 1 and world group > 1"
|
||||
assert self.dist_spec.placement.value == 'r', \
|
||||
"Can not set_process_group on a ColoTensor whose dist spec is not Replica"
|
||||
|
||||
self.process_group = pg
|
||||
|
||||
def get_tp_world_size(self) -> int:
|
||||
return self.process_group.tp_world_size()
|
||||
|
||||
def get_dp_world_size(self) -> int:
|
||||
"""get_dp_world_size
|
||||
get the dp world size of the tensor.
|
||||
|
||||
Returns:
|
||||
int: dp world size
|
||||
"""
|
||||
return self.process_group.dp_world_size()
|
||||
|
||||
def set_dist_spec(self, dist_spec: _DistSpec):
|
||||
"""set_dist_spec
|
||||
set dist spec and change the payloads.
|
||||
|
||||
Args:
|
||||
dist_spec (_DistSpec): target dist spec.
|
||||
"""
|
||||
assert isinstance(dist_spec, _DistSpec)
|
||||
assert self.process_group is not None
|
||||
self._redistribute(dist_spec)
|
||||
|
||||
def set_tensor_spec(self, dist_spec, compute_spec):
|
||||
if dist_spec is not None:
|
||||
assert isinstance(dist_spec, _DistSpec), f"{type(dist_spec)}"
|
||||
self.set_dist_spec(dist_spec)
|
||||
if compute_spec is not None:
|
||||
self.compute_spec = compute_spec
|
||||
|
||||
def has_compute_pattern(self, compute_pattern):
|
||||
return self.compute_spec.compute_pattern == compute_pattern
|
||||
|
||||
@classmethod
|
||||
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
||||
if kwargs is None:
|
||||
|
@ -175,9 +68,6 @@ class ColoTensor(torch.Tensor):
|
|||
|
||||
if not all(issubclass(cls, t) for t in types):
|
||||
return NotImplemented
|
||||
global _COLOSSAL_OPS
|
||||
if func in _COLOSSAL_OPS:
|
||||
func = _COLOSSAL_OPS[func]
|
||||
|
||||
if cls.torch_major > 1 or (cls.torch_major == 1 and cls.torch_minor >= 12):
|
||||
# in order to trigger pre-op hook in the forward of checkpoint module
|
||||
|
@ -189,94 +79,16 @@ class ColoTensor(torch.Tensor):
|
|||
tensor_kwargs = {k: torch.Tensor(v) if torch.is_tensor(v) else v for k, v in kwargs.items()}
|
||||
return backward_tensor.backward(**tensor_kwargs)
|
||||
|
||||
# replace the in-place function
|
||||
if func in INPALCE_MAPPING:
|
||||
func = INPALCE_MAPPING[func]
|
||||
# set the 'inplace' kwargs to False
|
||||
if 'inplace' in kwargs:
|
||||
kwargs['inplace'] = False
|
||||
|
||||
with torch._C.DisableTorchFunction():
|
||||
ret = func(*args, **kwargs)
|
||||
if func in _get_my_nowrap_functions():
|
||||
return ret
|
||||
else:
|
||||
colo_spec = _get_spec_from_args(args, kwargs)
|
||||
return _convert_output(ret, colo_spec)
|
||||
|
||||
def __repr__(self):
|
||||
output_list = [super(ColoTensor, self).__repr__()]
|
||||
output_list.append(str(self.process_group))
|
||||
output_list.append(str(self.dist_spec))
|
||||
if self.compute_spec is not None:
|
||||
output_list.append(str(self.compute_spec))
|
||||
return "\n".join(output_list)
|
||||
|
||||
def _redistribute(self, dist_spec: _DistSpec) -> None:
|
||||
"""_redistribute
|
||||
Note the function will not handle the logic of backward propagation!
|
||||
It is used during model tensor initializations as an internal function.
|
||||
|
||||
Args:
|
||||
dist_spec (_DistSpec): the target dist. spec.
|
||||
"""
|
||||
assert self.grad_fn is None, "Current tensor has grad_fn and it can't get converted"
|
||||
with DistSpecManager.no_grad():
|
||||
self.data = DistSpecManager.handle_trans_spec(self.data, self.dist_spec, dist_spec, self.process_group)
|
||||
self.dist_spec = dist_spec
|
||||
|
||||
def redistribute(self, dist_spec: _DistSpec, pg: Optional[ProcessGroup] = None) -> 'ColoTensor':
|
||||
"""redistribute
|
||||
Redistribute the tensor among processes. The rule is like this:
|
||||
|
||||
1. If the pg is None, then redistribute the tensor payload among the TP process group. Keep the
|
||||
DP process group not changed.
|
||||
|
||||
2. If the pg is not not None and not equal to the current process group.
|
||||
First, convert the tensor as replicated among the TP process group.
|
||||
Second, reset the process group to the new pg.
|
||||
Third, convert the tensor (new replicated both among the tp process group) to the new dist_spec.
|
||||
|
||||
Args:
|
||||
dist_spec (_DistSpec): the new dist spec.
|
||||
pg (Optional[ProcessGroup], optional): the new process group . Defaults to None.
|
||||
|
||||
Returns:
|
||||
ColoTensor: a redistributed colotensor
|
||||
"""
|
||||
if pg is not None and pg != self.get_process_group():
|
||||
# if the pg is not equal, convert the current tensor to replicated
|
||||
handled = self.redistribute(ReplicaSpec())
|
||||
else:
|
||||
handled = self
|
||||
pg = self.process_group
|
||||
|
||||
ret = DistSpecManager.handle_trans_spec(handled, handled.dist_spec, dist_spec, pg)
|
||||
return ColoTensor.from_torch_tensor(ret, ColoTensorSpec(pg=pg, dist_attr=dist_spec))
|
||||
|
||||
def to_replicate_(self):
|
||||
"""to_replicate_
|
||||
|
||||
an inline member function, converting dist spec of the tensor to REPLICATE
|
||||
"""
|
||||
self._redistribute(dist_spec=ReplicaSpec())
|
||||
|
||||
def to_replicate(self) -> 'ColoTensor':
|
||||
"""to_replicate
|
||||
|
||||
converting dist spec of the tensor to ReplicaSpec()
|
||||
"""
|
||||
return self.redistribute(ReplicaSpec())
|
||||
|
||||
@staticmethod
|
||||
def from_torch_tensor(tensor: torch.Tensor, spec: Optional[ColoTensorSpec] = None) -> 'ColoTensor':
|
||||
"""from_torch_tensor
|
||||
|
||||
A static method builds a `ColoTensor` from a PyTorch Tensor.
|
||||
|
||||
Args:
|
||||
tensor (torch.Tensor): the pytorch tensor, which is a local tensor for this rank not a global tensor.
|
||||
spec (Optional[ColoTensorSpec], optional): tensor spec. Defaults to None.
|
||||
|
||||
Returns:
|
||||
ColoTensor: a ColoTensor
|
||||
"""
|
||||
tensor = tensor.as_subclass(ColoTensor)
|
||||
tensor.__init__(tensor, spec=spec)
|
||||
return tensor
|
||||
return _convert_output(ret, func)
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
if id(self) in memo:
|
||||
|
@ -284,60 +96,6 @@ class ColoTensor(torch.Tensor):
|
|||
else:
|
||||
with torch._C.DisableTorchFunction():
|
||||
data = self.data.clone()
|
||||
tensor = ColoTensor(data, spec=copy(ColoTensorSpec(self.process_group, self.dist_spec, self.compute_spec)))
|
||||
tensor = ColoTensor(data)
|
||||
memo[id(self)] = tensor
|
||||
return tensor
|
||||
|
||||
# override builtin functions which must use tensor in replicate placement #
|
||||
|
||||
def size_local(self, *args) -> torch.Size:
|
||||
with torch._C.DisableTorchFunction():
|
||||
return super().size(*args)
|
||||
|
||||
def size_global(self, *args) -> torch.Size:
|
||||
"""size_global
|
||||
|
||||
override the torch building size()
|
||||
the shape passed in must be in a replicate placement.
|
||||
|
||||
Returns:
|
||||
torch.Size: the global tensor shape
|
||||
"""
|
||||
if self.is_replicate():
|
||||
return self.size_local(*args)
|
||||
spec = self.dist_spec
|
||||
dims = spec.dims
|
||||
num_partitions = spec.num_partitions
|
||||
# import inspect
|
||||
# print(*['{:40}| {}:{}\n'.format(x.function, x.filename, x.lineno) for x in inspect.stack()])
|
||||
size_list = list(self.size_local())
|
||||
for dim, num_partition in zip(dims, num_partitions):
|
||||
size_list[dim] *= num_partition
|
||||
if args == ():
|
||||
return torch.Size(size_list)
|
||||
else:
|
||||
return size_list[args[0]]
|
||||
|
||||
def numel_global(self):
|
||||
"""Returns the number of elements in the tensor when it's replicated.
|
||||
"""
|
||||
return reduce(operator.mul, self.size_global(), 1)
|
||||
|
||||
# Some API for dist spec check
|
||||
|
||||
def is_replicate(self):
|
||||
return self.dist_spec.placement == DistPlacementPattern.REPLICATE \
|
||||
or (len(self.dist_spec.num_partitions) == 1
|
||||
and self.dist_spec.num_partitions[0] == 1) \
|
||||
or (self.process_group.tp_world_size() == 1)
|
||||
|
||||
def is_shard_1dcol(self):
|
||||
return self.dist_spec.placement == DistPlacementPattern.SHARD \
|
||||
and len(self.dist_spec.dims) == 1 and self.dist_spec.dims[0] == -1
|
||||
|
||||
def is_shard_1drow(self):
|
||||
return self.dist_spec.placement == DistPlacementPattern.SHARD \
|
||||
and len(self.dist_spec.dims) == 1 and self.dist_spec.dims[0] == 0
|
||||
|
||||
def is_sharded(self):
|
||||
return self.dist_spec.placement == DistPlacementPattern.SHARD
|
||||
|
|
|
@ -3,9 +3,7 @@ from contextlib import contextmanager
|
|||
from typing import Any, List, Tuple
|
||||
|
||||
import torch
|
||||
|
||||
from colossalai.tensor.colo_tensor import ColoTensor
|
||||
from colossalai.tensor.tensor_spec import ColoTensorSpec
|
||||
from torch.utils._pytree import TreeSpec, tree_flatten, tree_unflatten
|
||||
|
||||
|
||||
class ColoParamOpHook(ABC):
|
||||
|
@ -82,26 +80,18 @@ class ColoParamOpHookManager:
|
|||
@staticmethod
|
||||
def pre_op(params: List[torch.Tensor], *args: Any) -> list:
|
||||
ColoParamOpHookManager._trigger_pre_forward(params)
|
||||
grad_args, rear_args = _get_grad_args(*args)
|
||||
colo_info = _get_colo_tensors_info(*grad_args)
|
||||
rets = PreFwdPostBwd.apply(params, *grad_args)
|
||||
update_args = _update_colo_tensors(colo_info, *rets)
|
||||
if rear_args is None:
|
||||
return update_args
|
||||
else:
|
||||
arg_zero = (tuple(update_args),)
|
||||
return arg_zero + rear_args
|
||||
# auto grad function can only recognize torch.Tensor, thus we have to flatten the input
|
||||
# if one of the input requires grad, all the output will be treated as requires grad
|
||||
# and will have grad fn even the corresponding input does not require grad
|
||||
# we have to extract tensors requiring grad into flat list and then merge them back
|
||||
grad_args, other_args, grad_flags, spec = _flatten_grad_args(args)
|
||||
new_grad_args = PreFwdPostBwd.apply(params, *grad_args)
|
||||
return _merge_args(new_grad_args, other_args, grad_flags, spec)
|
||||
|
||||
@staticmethod
|
||||
def post_op(params: List[torch.Tensor], arg: Any) -> Any:
|
||||
ColoParamOpHookManager._trigger_post_forward(params)
|
||||
colo_info = _get_colo_tensors_info(arg)
|
||||
ret = PostFwdPreBwd.apply(params, arg)
|
||||
res = _update_colo_tensors(colo_info, ret)
|
||||
if len(res) == 1:
|
||||
return res[0]
|
||||
else:
|
||||
return res
|
||||
return PostFwdPreBwd.apply(params, arg)
|
||||
|
||||
@staticmethod
|
||||
def has_hook() -> bool:
|
||||
|
@ -141,57 +131,24 @@ def _is_grad_tensor(obj) -> bool:
|
|||
return False
|
||||
|
||||
|
||||
def _has_grad_tensor(obj) -> bool:
|
||||
if isinstance(obj, tuple) or isinstance(obj, list):
|
||||
for x in obj:
|
||||
if _has_grad_tensor(x):
|
||||
return True
|
||||
return False
|
||||
elif isinstance(obj, dict):
|
||||
for x in obj.values():
|
||||
if _has_grad_tensor(x):
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
return _is_grad_tensor(obj)
|
||||
|
||||
|
||||
def _get_grad_args(*args):
|
||||
# if there is no grad tensors, do nothing
|
||||
if not _has_grad_tensor(args):
|
||||
return args, None
|
||||
# returns the identical args if there is a grad tensor
|
||||
for obj in args:
|
||||
if _is_grad_tensor(obj):
|
||||
return args, None
|
||||
# otherwise, the first argument should be a tuple of grad tensors
|
||||
# if there is no grad tensor, the backward of PreFwdPostBwd can't be triggered
|
||||
arg_zero = args[0]
|
||||
if not isinstance(arg_zero, tuple):
|
||||
raise NotImplementedError("Some torch function is incompatible because of its complicated inputs.")
|
||||
check_grad_flag = False
|
||||
for obj in arg_zero:
|
||||
check_grad_flag |= _is_grad_tensor(obj)
|
||||
if not check_grad_flag:
|
||||
raise NotImplementedError("Some torch function is incompatible because of its complicated inputs.")
|
||||
return arg_zero, args[1:]
|
||||
|
||||
|
||||
def _get_colo_tensors_info(*args) -> list:
|
||||
info = []
|
||||
for arg in args:
|
||||
if isinstance(arg, ColoTensor):
|
||||
info.append((arg.__class__, ColoTensorSpec(arg.get_process_group(), arg.dist_spec, arg.compute_spec)))
|
||||
def _flatten_grad_args(args) -> Tuple[list, list, List[bool], TreeSpec]:
|
||||
flat_args, spec = tree_flatten(args)
|
||||
grad_args = []
|
||||
other_args = []
|
||||
grad_flags = []
|
||||
for arg in flat_args:
|
||||
flag = _is_grad_tensor(arg)
|
||||
grad_flags.append(flag)
|
||||
if flag:
|
||||
grad_args.append(arg)
|
||||
else:
|
||||
info.append(None)
|
||||
return info
|
||||
other_args.append(arg)
|
||||
assert len(grad_args) > 0
|
||||
return grad_args, other_args, grad_flags, spec
|
||||
|
||||
|
||||
def _update_colo_tensors(info, *args) -> list:
|
||||
ret = []
|
||||
for t_info, arg in zip(info, args):
|
||||
if t_info is not None:
|
||||
t_cls, spec = t_info
|
||||
arg = t_cls.from_torch_tensor(arg, spec=spec)
|
||||
ret.append(arg)
|
||||
return ret
|
||||
def _merge_args(grad_args, other_args, grad_flags, spec):
|
||||
grad_iter = iter(grad_args)
|
||||
other_iter = iter(other_args)
|
||||
flat_args = [next(grad_iter) if flag else next(other_iter) for flag in grad_flags]
|
||||
return tree_unflatten(flat_args, spec)
|
||||
|
|
|
@ -2,8 +2,7 @@ from .gemini import (
|
|||
ColoInitContext,
|
||||
GeminiAdamOptimizer,
|
||||
GeminiDDP,
|
||||
ZeroDDP,
|
||||
ZeroOptimizer,
|
||||
GeminiOptimizer,
|
||||
get_static_torch_model,
|
||||
post_process_colo_init_ctx,
|
||||
)
|
||||
|
@ -11,6 +10,6 @@ from .low_level import LowLevelZeroOptimizer
|
|||
from .wrapper import zero_model_wrapper, zero_optim_wrapper
|
||||
|
||||
__all__ = [
|
||||
'ZeroDDP', 'GeminiDDP', 'ZeroOptimizer', 'GeminiAdamOptimizer', 'zero_model_wrapper', 'zero_optim_wrapper',
|
||||
'GeminiDDP', 'GeminiOptimizer', 'GeminiAdamOptimizer', 'zero_model_wrapper', 'zero_optim_wrapper',
|
||||
'LowLevelZeroOptimizer', 'ColoInitContext', 'post_process_colo_init_ctx', 'get_static_torch_model'
|
||||
]
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
from .chunk import ChunkManager, TensorInfo, TensorState, search_chunk_configuration
|
||||
from .colo_init_context import ColoInitContext, post_process_colo_init_ctx
|
||||
from .gemini_ddp import GeminiDDP, ZeroDDP
|
||||
from .gemini_ddp import GeminiDDP
|
||||
from .gemini_mgr import GeminiManager
|
||||
from .gemini_optimizer import GeminiAdamOptimizer, ZeroOptimizer
|
||||
from .gemini_optimizer import GeminiAdamOptimizer, GeminiOptimizer
|
||||
from .utils import get_static_torch_model
|
||||
|
||||
__all__ = [
|
||||
'GeminiManager', 'TensorInfo', 'TensorState', 'ChunkManager', 'search_chunk_configuration', 'ZeroDDP', 'GeminiDDP',
|
||||
'get_static_torch_model', 'GeminiAdamOptimizer', 'ZeroOptimizer', 'ColoInitContext', 'post_process_colo_init_ctx'
|
||||
'GeminiManager', 'TensorInfo', 'TensorState', 'ChunkManager', 'search_chunk_configuration', 'GeminiDDP',
|
||||
'get_static_torch_model', 'GeminiAdamOptimizer', 'GeminiOptimizer', 'ColoInitContext', 'post_process_colo_init_ctx'
|
||||
]
|
||||
|
|
|
@ -4,8 +4,8 @@ from typing import Dict, List, Optional
|
|||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from torch.distributed import ProcessGroup
|
||||
|
||||
from colossalai.tensor import ProcessGroup as ColoProcessGroup
|
||||
from colossalai.utils import get_current_device
|
||||
|
||||
|
||||
|
@ -55,7 +55,7 @@ class Chunk:
|
|||
|
||||
def __init__(self,
|
||||
chunk_size: int,
|
||||
process_group: ColoProcessGroup,
|
||||
process_group: ProcessGroup,
|
||||
dtype: torch.dtype,
|
||||
init_device: Optional[torch.device] = None,
|
||||
cpu_shard_init: bool = False,
|
||||
|
@ -69,7 +69,7 @@ class Chunk:
|
|||
|
||||
Args:
|
||||
chunk_size (int): the number of elements in the chunk
|
||||
process_group (ColoProcessGroup): the process group of this chunk
|
||||
process_group (ProcessGroup): the process group of this chunk
|
||||
dtype (torch.dtype): the data type of the chunk
|
||||
init_device (torch.device): optional, During the chunk construction process, where the tensor is stored.
|
||||
The default value is None, which is the current GPU
|
||||
|
@ -83,7 +83,7 @@ class Chunk:
|
|||
self.chunk_size = chunk_size
|
||||
self.utilized_size = 0
|
||||
|
||||
self.torch_pg = process_group.dp_process_group()
|
||||
self.torch_pg = process_group
|
||||
self.pg_size = dist.get_world_size(self.torch_pg)
|
||||
self.pg_rank = dist.get_rank(self.torch_pg)
|
||||
|
||||
|
@ -218,7 +218,7 @@ class Chunk:
|
|||
return False
|
||||
else:
|
||||
return self.tensor_state_cnter[TensorState.HOLD] + \
|
||||
self.tensor_state_cnter[TensorState.HOLD_AFTER_BWD] == self.num_tensors
|
||||
self.tensor_state_cnter[TensorState.HOLD_AFTER_BWD] == self.num_tensors
|
||||
|
||||
@property
|
||||
def can_reduce(self):
|
||||
|
|
|
@ -2,8 +2,9 @@ from collections import deque
|
|||
from typing import Deque, Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from torch.distributed import ProcessGroup
|
||||
|
||||
from colossalai.tensor import ColoTensor
|
||||
from colossalai.utils import get_current_device
|
||||
|
||||
from .chunk import Chunk, ChunkFullError, TensorState
|
||||
|
@ -27,16 +28,17 @@ class ChunkManager:
|
|||
self.dp_degree_chunk_size_dict[k] = v.pop('chunk_size')
|
||||
v['init_device'] = self.device
|
||||
|
||||
self.chunk_groups: Dict[str, Deque] = dict()
|
||||
self.chunk_groups: Dict[str, Deque[Chunk]] = dict()
|
||||
self.tensor_chunk_map: Dict[torch.Tensor, Chunk] = dict()
|
||||
self.accessed_chunks: Set[Chunk] = set()
|
||||
self.accessed_mem: int = 0
|
||||
self.total_mem: Dict[str, int] = {'cpu': 0, 'cuda': 0}
|
||||
|
||||
def register_tensor(self,
|
||||
tensor: ColoTensor,
|
||||
tensor: torch.Tensor,
|
||||
group_type: str,
|
||||
config_key: int,
|
||||
process_group: ProcessGroup,
|
||||
cpu_offload: bool = False,
|
||||
pin_memory: bool = False) -> None:
|
||||
"""
|
||||
|
@ -51,7 +53,7 @@ class ChunkManager:
|
|||
pin_memory: whether the chunk is pinned in the cpu memory
|
||||
"""
|
||||
assert tensor not in self.tensor_chunk_map
|
||||
assert isinstance(tensor, ColoTensor), "Please feed ColoTensor to this ChunkManager"
|
||||
assert isinstance(tensor, torch.Tensor), "Please feed Tensor to this ChunkManager"
|
||||
assert config_key in self.dp_degree_chunk_size_dict
|
||||
|
||||
chunk_size = self.dp_degree_chunk_size_dict[config_key]
|
||||
|
@ -73,12 +75,12 @@ class ChunkManager:
|
|||
|
||||
if tensor.numel() > chunk_size:
|
||||
chunk_size = tensor.numel()
|
||||
dp_size = tensor.get_dp_world_size()
|
||||
dp_size = dist.get_world_size(process_group)
|
||||
chunk_size = chunk_size + (-chunk_size % dp_size)
|
||||
|
||||
chunk = Chunk(
|
||||
chunk_size=chunk_size,
|
||||
process_group=tensor.process_group,
|
||||
process_group=process_group,
|
||||
dtype=tensor.dtype,
|
||||
cpu_shard_init=cpu_offload,
|
||||
pin_memory=pin_memory,
|
||||
|
@ -220,7 +222,7 @@ class ChunkManager:
|
|||
msg.append(f'[{i}] {chunk}\n')
|
||||
return ''.join(msg)
|
||||
|
||||
def __get_chunk_group(self, group_name: str) -> Deque:
|
||||
def __get_chunk_group(self, group_name: str) -> Deque[Chunk]:
|
||||
"""Register a chunk group.
|
||||
"""
|
||||
if group_name not in self.chunk_groups:
|
||||
|
|
|
@ -4,6 +4,7 @@ from typing import Dict, List, Optional, Tuple
|
|||
import numpy as np
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
from torch.distributed import ProcessGroup
|
||||
|
||||
from colossalai.tensor import ColoParameter
|
||||
from colossalai.utils import is_ddp_ignored
|
||||
|
@ -59,7 +60,7 @@ def _get_unused_byte(size_list: List[int], chunk_size: int) -> int:
|
|||
return left + acc
|
||||
|
||||
|
||||
def _tensor_numel(local_param: ColoParameter, strict_ddp_flag: bool) -> int:
|
||||
def _tensor_numel(local_param: ColoParameter) -> int:
|
||||
"""_tensor_numel
|
||||
|
||||
Get the number of elements of a tensor.
|
||||
|
@ -71,15 +72,12 @@ def _tensor_numel(local_param: ColoParameter, strict_ddp_flag: bool) -> int:
|
|||
Returns:
|
||||
int: the number of elements.
|
||||
"""
|
||||
if strict_ddp_flag and type(local_param) is ColoParameter:
|
||||
return local_param.numel_global()
|
||||
else:
|
||||
# if local_param is not ColoParameter, we assume it's replicated
|
||||
return local_param.numel()
|
||||
# TODO(ver217): support dtensor here
|
||||
return local_param.numel()
|
||||
|
||||
|
||||
def classify_params_by_dp_degree(param_order: OrderedParamGenerator,
|
||||
strict_ddp_flag: bool = False) -> Dict[int, List[ColoParameter]]:
|
||||
process_group: ProcessGroup) -> Dict[int, List[ColoParameter]]:
|
||||
"""classify_params_by_dp_degree
|
||||
|
||||
Classify the parameters by their dp degree
|
||||
|
@ -97,13 +95,7 @@ def classify_params_by_dp_degree(param_order: OrderedParamGenerator,
|
|||
# assert isinstance(param, ColoParameter), "please init model in the ColoInitContext"
|
||||
if is_ddp_ignored(param):
|
||||
continue
|
||||
|
||||
if strict_ddp_flag or type(param) is not ColoParameter:
|
||||
# if model is not initialized with ColoInitContext, we assume it's replicated
|
||||
# TODO(ver217): integrate DTensor
|
||||
param_key = dist.get_world_size()
|
||||
else:
|
||||
param_key = param.process_group.dp_world_size()
|
||||
param_key = dist.get_world_size(process_group)
|
||||
|
||||
if param_key not in params_dict:
|
||||
params_dict[param_key] = []
|
||||
|
@ -119,6 +111,7 @@ def search_chunk_configuration(
|
|||
min_chunk_size_m: float = 32,
|
||||
filter_exlarge_params: bool = True,
|
||||
strict_ddp_flag: bool = False,
|
||||
process_group: Optional[ProcessGroup] = None,
|
||||
memstas: Optional[MemStats] = None) -> Tuple[Dict, int, int]:
|
||||
"""search_chunk_configuration
|
||||
|
||||
|
@ -149,7 +142,7 @@ def search_chunk_configuration(
|
|||
min_chunk_size = round(min_chunk_size_m * 1024**2)
|
||||
assert search_range >= 0
|
||||
|
||||
params_dict = classify_params_by_dp_degree(param_order, strict_ddp_flag)
|
||||
params_dict = classify_params_by_dp_degree(param_order, process_group)
|
||||
size_lcm = np.lcm.reduce(list(params_dict.keys()))
|
||||
config_dict: Dict[int, Dict] = dict()
|
||||
total_param_size = 0
|
||||
|
@ -157,7 +150,7 @@ def search_chunk_configuration(
|
|||
size_dict: Dict[int, List[int]] = dict()
|
||||
for dp_degree in params_dict:
|
||||
params_list = params_dict[dp_degree]
|
||||
size_list = [_tensor_numel(p, strict_ddp_flag) for p in params_list]
|
||||
size_list = [_tensor_numel(p) for p in params_list]
|
||||
group_acc_size = sum(size_list)
|
||||
total_param_size += group_acc_size
|
||||
|
||||
|
|
|
@ -2,19 +2,21 @@ import itertools
|
|||
from collections import OrderedDict
|
||||
from contextlib import nullcontext
|
||||
from functools import partial
|
||||
from typing import Dict, Iterator, List, Optional, Set, Tuple, Union
|
||||
from typing import Dict, Iterable, Iterator, List, Optional, Set, Tuple, Union
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
from torch.distributed import ProcessGroup
|
||||
from torch.distributed.distributed_c10d import _get_default_group
|
||||
|
||||
from colossalai.checkpoint_io.utils import calculate_tensor_size, StateDictSharder
|
||||
from colossalai.interface import ModelWrapper
|
||||
|
||||
from colossalai.checkpoint_io.utils import StateDictSharder
|
||||
from colossalai.lazy import LazyTensor
|
||||
from colossalai.logging import get_dist_logger
|
||||
from colossalai.nn.parallel.data_parallel import ColoDDP, _cast_float, free_storage
|
||||
from colossalai.tensor import ProcessGroup as ColoProcessGroup
|
||||
from colossalai.tensor import ReplicaSpec
|
||||
from colossalai.tensor.colo_parameter import ColoParameter, ColoTensor, ColoTensorSpec
|
||||
from colossalai.nn.parallel.data_parallel import _cast_float, free_storage
|
||||
from colossalai.tensor.colo_parameter import ColoParameter
|
||||
from colossalai.tensor.param_op_hook import ColoParamOpHookManager
|
||||
from colossalai.utils import get_current_device, is_ddp_ignored
|
||||
|
||||
|
@ -30,14 +32,13 @@ except ImportError:
|
|||
_EXTRA_STATE_KEY_SUFFIX = '_extra_state'
|
||||
|
||||
__all__ = [
|
||||
'ZeroDDP',
|
||||
'GeminiDDP',
|
||||
]
|
||||
|
||||
|
||||
class ZeroDDP(ColoDDP):
|
||||
"""ZeRO DDP for ColoTensor.
|
||||
Warning: Nested ZeroDDP is not supported now.
|
||||
class GeminiDDP(ModelWrapper):
|
||||
"""ZeRO DDP.
|
||||
Warning: Nested GeminiDDP is not supported now.
|
||||
It is designed to be used with ChunkManager and GeminiManager.
|
||||
For more details, see the API reference of ``ChunkManager`` and ``GeminiManager``.
|
||||
|
||||
|
@ -54,20 +55,54 @@ class ZeroDDP(ColoDDP):
|
|||
mixed_precision (torch.dtype): If set to torch.float16, the model will be trained in fp16. Otherwise, the model will be trained in bf16. Defaults to torch.float16.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
module: torch.nn.Module,
|
||||
gemini_manager: GeminiManager,
|
||||
pin_memory: bool = False,
|
||||
force_outputs_fp32: bool = False,
|
||||
strict_ddp_mode: bool = False,
|
||||
scatter_after_inference: bool = True,
|
||||
mixed_precision: torch.dtype = torch.float16) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
module: torch.nn.Module,
|
||||
chunk_config_dict: Optional[dict] = None,
|
||||
chunk_init_device: torch.device = torch.device('cpu'),
|
||||
placement_policy: str = "static",
|
||||
shard_param_frac: float = 1.0, # only for static placement
|
||||
offload_optim_frac: float = 0.0, # only for static placement
|
||||
offload_param_frac: float = 0.0, # only for static placement
|
||||
warmup_non_model_data_ratio: float = 0.8, # only for auto placement
|
||||
steady_cuda_cap_ratio: float = 0.9, # only for auto placement
|
||||
search_range_m: int = 32, # chunk search options
|
||||
hidden_dim: Optional[int] = None, # chunk search options
|
||||
min_chunk_size_m: float = 32, # chunk search options
|
||||
pin_memory: bool = False,
|
||||
force_outputs_fp32: bool = False,
|
||||
strict_ddp_mode: bool = False,
|
||||
scatter_after_inference: bool = True,
|
||||
mixed_precision: torch.dtype = torch.float16,
|
||||
process_group: Optional[ProcessGroup] = None,
|
||||
memstats: Optional[MemStats] = None, # genimi memory stats
|
||||
verbose: bool = False) -> None:
|
||||
assert mixed_precision in (torch.float16, torch.bfloat16)
|
||||
self.gemini_manager = gemini_manager
|
||||
self.chunk_manager: ChunkManager = gemini_manager.chunk_manager
|
||||
if chunk_config_dict is not None:
|
||||
self.chunk_manager = ChunkManager(chunk_config_dict, chunk_init_device)
|
||||
else:
|
||||
# some ugly hotfix for the compatibility with Lightning
|
||||
if search_range_m is None:
|
||||
search_range_m = 32
|
||||
self.chunk_manager = init_chunk_manager(model=module,
|
||||
init_device=chunk_init_device,
|
||||
hidden_dim=hidden_dim,
|
||||
search_range_m=search_range_m,
|
||||
min_chunk_size_m=min_chunk_size_m,
|
||||
strict_ddp_flag=strict_ddp_mode,
|
||||
process_group=process_group,
|
||||
verbose=verbose)
|
||||
self.gemini_manager = GeminiManager(placement_policy,
|
||||
self.chunk_manager,
|
||||
memstats,
|
||||
shard_param_frac=shard_param_frac,
|
||||
offload_optim_frac=offload_optim_frac,
|
||||
offload_param_frac=offload_param_frac,
|
||||
warmup_non_model_data_ratio=warmup_non_model_data_ratio,
|
||||
steady_cuda_cap_ratio=steady_cuda_cap_ratio)
|
||||
self.force_outputs_fp32 = force_outputs_fp32
|
||||
self.param_op_hook = GeminiZeROHook(gemini_manager)
|
||||
self.fp32_params: List[ColoTensor] = list()
|
||||
self.param_op_hook = GeminiZeROHook(self.gemini_manager)
|
||||
self.fp32_params: List[torch.Tensor] = list()
|
||||
self.fp16_params: List[ColoParameter] = list()
|
||||
self.overflow_counter = 0
|
||||
self.grads_device: Dict[torch.Tensor, torch.device] = dict()
|
||||
|
@ -75,6 +110,7 @@ class ZeroDDP(ColoDDP):
|
|||
self.name2param: Dict[str, nn.Parameter] = dict()
|
||||
self.scatter_after_inference = scatter_after_inference
|
||||
self.mixed_precision = mixed_precision
|
||||
self.dp_process_group = process_group or _get_default_group()
|
||||
|
||||
self._logger = get_dist_logger()
|
||||
|
||||
|
@ -88,20 +124,67 @@ class ZeroDDP(ColoDDP):
|
|||
for p in module.parameters():
|
||||
param_order.append(p)
|
||||
|
||||
self._init_chunks(param_order=param_order,
|
||||
strict_ddp_mode=strict_ddp_mode,
|
||||
cpu_offload=self.gemini_manager.policy_name != 'cuda',
|
||||
pin_memory=pin_memory)
|
||||
|
||||
for name, param in module.named_parameters():
|
||||
self.param2name[param] = name
|
||||
for m_name, m_var in module.named_modules():
|
||||
for p_name, p_var in m_var.named_parameters(recurse=False):
|
||||
param_name = m_name + '.' + p_name if m_name else p_name
|
||||
self.name2param[param_name] = p_var
|
||||
super().__init__(module, process_group=ColoProcessGroup())
|
||||
|
||||
self._init_chunks(param_order=param_order,
|
||||
strict_ddp_mode=strict_ddp_mode,
|
||||
cpu_offload=self.gemini_manager.policy_name != 'cuda',
|
||||
pin_memory=pin_memory)
|
||||
super().__init__(module)
|
||||
self._non_persistent_buffers_set = self._get_non_persistent_buffers_set(module)
|
||||
self._cast_buffers()
|
||||
# register grad hook
|
||||
for p in module.parameters():
|
||||
if is_ddp_ignored(p):
|
||||
continue
|
||||
if p.requires_grad:
|
||||
p.register_hook(partial(self.grad_handle, p))
|
||||
|
||||
def parameters(self, recurse: bool = True):
|
||||
return self.module.parameters(recurse)
|
||||
|
||||
def named_parameters(self, prefix: str = '', recurse: bool = True):
|
||||
return self.module.named_parameters(prefix, recurse)
|
||||
|
||||
def named_buffers(self, prefix: str = '', recurse: bool = True):
|
||||
return self.module.named_buffers(prefix, recurse)
|
||||
|
||||
def named_children(self):
|
||||
return self.module.named_children()
|
||||
|
||||
def named_modules(self,
|
||||
memo: Optional[Set[torch.nn.Module]] = None,
|
||||
prefix: str = '',
|
||||
remove_duplicate: bool = True):
|
||||
return self.module.named_modules(memo, prefix, remove_duplicate)
|
||||
|
||||
@staticmethod
|
||||
def set_params_to_ignore(params_to_ignore: Iterable[torch.Tensor]) -> None:
|
||||
"""Sets parameters to be ignored by DDP.
|
||||
This method must be called before initializing ColoDDP.
|
||||
|
||||
Example:
|
||||
>>> params_to_ignore = []
|
||||
>>> for p in module.parameters():
|
||||
>>> if should_ignore(p):
|
||||
>>> params_to_ignore.append(p)
|
||||
>>> ColoDDP.set_params_to_ignore(params_to_ignore)
|
||||
>>> module = ColoDDP(module)
|
||||
|
||||
Args:
|
||||
params_to_ignore (Iterable[torch.Tensor]): A list of parameters to be ignored.
|
||||
"""
|
||||
for p in params_to_ignore:
|
||||
p._ddp_to_ignore = True
|
||||
|
||||
def unwrap(self):
|
||||
# as save/load state dict is overwrited, only return self
|
||||
return self
|
||||
|
||||
def _get_non_persistent_buffers_set(self,
|
||||
module,
|
||||
|
@ -207,7 +290,7 @@ class ZeroDDP(ColoDDP):
|
|||
error_params.append(self.param2name[param])
|
||||
error_str = "\n\t".join(error_params)
|
||||
raise RuntimeError("ZERO DDP error: the synchronization of gradients doesn't exit properly.",
|
||||
"The most possible reason is that the model is not compatible with ZeroDDP.\n",
|
||||
"The most possible reason is that the model is not compatible with GeminiDDP.\n",
|
||||
f"{error_str}")
|
||||
self._setup_grads_ptr()
|
||||
self._logger.debug(
|
||||
|
@ -227,6 +310,7 @@ class ZeroDDP(ColoDDP):
|
|||
self._post_backward()
|
||||
|
||||
def grad_handle(self, p, grad):
|
||||
setattr(p, "_gemini_reduced", True)
|
||||
empty_grad = torch.empty_like(grad)
|
||||
free_storage(empty_grad)
|
||||
with torch._C.DisableTorchFunction():
|
||||
|
@ -533,7 +617,7 @@ class ZeroDDP(ColoDDP):
|
|||
for chunk_32 in chunk_list:
|
||||
chunk_16 = chunk_32.paired_chunk
|
||||
assert chunk_16 is not None
|
||||
chunk_16.optim_update()
|
||||
chunk_16.payload.copy_(chunk_32.payload)
|
||||
|
||||
for name, buf in persistent_buffers.items():
|
||||
if buf is not None:
|
||||
|
@ -557,17 +641,11 @@ class ZeroDDP(ColoDDP):
|
|||
unexpected_keys.append(key)
|
||||
|
||||
def _init_chunks(self, param_order, strict_ddp_mode: bool, cpu_offload: bool, pin_memory: bool):
|
||||
ddp_pg = ColoProcessGroup()
|
||||
dp_world_size = dist.get_world_size(self.dp_process_group)
|
||||
for p in param_order.generate():
|
||||
self._preprocess_param(p)
|
||||
assert type(p) is ColoParameter
|
||||
|
||||
# gather sharded parameters in the strict ddp mode
|
||||
if strict_ddp_mode:
|
||||
if not p.is_replicate():
|
||||
p.set_dist_spec(ReplicaSpec())
|
||||
p.set_process_group(pg=ddp_pg)
|
||||
|
||||
# ignore the parameters with no gradient
|
||||
if not p.requires_grad:
|
||||
self.set_params_to_ignore([p])
|
||||
|
@ -578,38 +656,37 @@ class ZeroDDP(ColoDDP):
|
|||
continue
|
||||
|
||||
# create a fp32 parameter
|
||||
fp32_data = p.data.float()
|
||||
fp32_p = ColoTensor(fp32_data, spec=ColoTensorSpec(p.process_group))
|
||||
fp32_p = p.data.float()
|
||||
# create a fp16 parameter
|
||||
p.data = p.data.to(self.mixed_precision)
|
||||
|
||||
# register the fp16 parameter and fp32 parameter in the chunk manager
|
||||
dp_world_size = p.process_group.dp_world_size()
|
||||
self.chunk_manager.register_tensor(tensor=p,
|
||||
group_type='fp16_param',
|
||||
config_key=dp_world_size,
|
||||
process_group=self.dp_process_group,
|
||||
cpu_offload=cpu_offload,
|
||||
pin_memory=pin_memory)
|
||||
self.chunk_manager.register_tensor(tensor=fp32_p,
|
||||
group_type='fp32_param',
|
||||
config_key=dp_world_size,
|
||||
process_group=self.dp_process_group,
|
||||
cpu_offload=cpu_offload,
|
||||
pin_memory=pin_memory)
|
||||
|
||||
self.fp16_params.append(p)
|
||||
self.fp32_params.append(fp32_p)
|
||||
self.grads_device[p] = self.gemini_manager.default_device
|
||||
|
||||
self.chunk_manager.close_all_groups()
|
||||
|
||||
self.gemini_manager.setup_grads_device(self.fp16_params, self.grads_device)
|
||||
# move master weights to corresponding device and setup paired chunks
|
||||
for p, fp32_p in zip(self.fp16_params, self.fp32_params):
|
||||
chunk_16 = self.chunk_manager.get_chunk(p)
|
||||
chunk_32 = self.chunk_manager.get_chunk(fp32_p)
|
||||
chunk_32.init_pair(chunk_16)
|
||||
|
||||
# keep gathered chunks are in CUDA
|
||||
if chunk_16.keep_gathered:
|
||||
self.grads_device[p] = get_current_device()
|
||||
if chunk_32.device_type != self.grads_device[p].type:
|
||||
self.chunk_manager.move_chunk(chunk_32, self.grads_device[p])
|
||||
|
||||
def _cast_buffers(self):
|
||||
for buffer in self.module.buffers():
|
||||
|
@ -705,65 +782,3 @@ class ZeroDDP(ColoDDP):
|
|||
yield sharder.current_block, sharder.current_block_size
|
||||
|
||||
|
||||
class GeminiDDP(ZeroDDP):
|
||||
|
||||
def __init__(self,
|
||||
module: torch.nn.Module,
|
||||
device: torch.device,
|
||||
placement_policy: str = "cpu",
|
||||
pin_memory: bool = False,
|
||||
force_outputs_fp32: bool = False,
|
||||
strict_ddp_mode: bool = False,
|
||||
scatter_after_inference: bool = True,
|
||||
search_range_m: int = 32,
|
||||
hidden_dim: Optional[int] = None,
|
||||
min_chunk_size_m: float = 32,
|
||||
memstats: Optional[MemStats] = None,
|
||||
mixed_precision: torch.dtype = torch.float16,
|
||||
verbose: bool = False) -> None:
|
||||
"""
|
||||
A torch.Module wrapper using ZeRO-DP and Gemini.
|
||||
ZeRO is for parallel. Gemini is for memory management.
|
||||
WARNING: The class will modify the module inline!
|
||||
|
||||
Example:
|
||||
model is initialized under the context of ColoInitContext
|
||||
>>> model = GeminiDDP(model, torch.cuda.current_device(), "cuda")
|
||||
>>> logits = model(x)
|
||||
>>> loss = criterion(logits, labels)
|
||||
>>> model.backward(loss)
|
||||
|
||||
Args:
|
||||
module (torch.nn.Module): the model to be wrapped.
|
||||
device (torch.device): device to place the model.
|
||||
placement_policy (str, optional): "cpu", "cuda", "auto". Defaults to "cpu".
|
||||
pin_memory (bool, optional): use pin memory on CPU. Defaults to False.
|
||||
force_outputs_fp32 (bool, optional): force outputs are fp32. Defaults to False.
|
||||
search_range_m (int, optional): chunk size searching range divided by 2^20. Defaults to 32.
|
||||
hidden_dim (int, optional): the hidden dimension of DNN.
|
||||
Users can provide this argument to speed up searching.
|
||||
If users do not know this argument before training, it is ok. We will use a default value 1024.
|
||||
min_chunk_size_m (float, optional): the minimum chunk size divided by 2^20.
|
||||
If the aggregate size of parameters is still smaller than the minimum chunk size,
|
||||
all parameters will be compacted into one small chunk.
|
||||
memstats (MemStats, optional) the memory statistics collector by a runtime memory tracer.
|
||||
"""
|
||||
# some ugly hotfix for the compatibility with Lightning
|
||||
if search_range_m is None:
|
||||
search_range_m = 32
|
||||
|
||||
chunk_manager = init_chunk_manager(model=module,
|
||||
init_device=device,
|
||||
hidden_dim=hidden_dim,
|
||||
search_range_m=search_range_m,
|
||||
min_chunk_size_m=min_chunk_size_m,
|
||||
strict_ddp_flag=strict_ddp_mode,
|
||||
verbose=verbose)
|
||||
gemini_manager = GeminiManager(placement_policy, chunk_manager, memstats)
|
||||
super().__init__(module,
|
||||
gemini_manager,
|
||||
pin_memory,
|
||||
force_outputs_fp32,
|
||||
strict_ddp_mode,
|
||||
scatter_after_inference,
|
||||
mixed_precision=mixed_precision)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import functools
|
||||
from time import time
|
||||
from typing import List, Optional, Tuple
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import torch
|
||||
|
||||
|
@ -26,7 +26,11 @@ class GeminiManager:
|
|||
memstats (MemStats, optional): a mem stats collected by a runtime mem tracer. if None then GeminiManager will collect it during a warmup iteration.
|
||||
"""
|
||||
|
||||
def __init__(self, placement_policy: str, chunk_manager: ChunkManager, memstats: Optional[MemStats] = None) -> None:
|
||||
def __init__(self,
|
||||
placement_policy: str,
|
||||
chunk_manager: ChunkManager,
|
||||
memstats: Optional[MemStats] = None,
|
||||
**placement_kwargs) -> None:
|
||||
|
||||
assert placement_policy in PlacementPolicyFactory.get_policy_names()
|
||||
self.policy_name = placement_policy
|
||||
|
@ -37,7 +41,7 @@ class GeminiManager:
|
|||
self._memstats = memstats
|
||||
self._mem_stats_collector = ChunkMemStatsCollector(chunk_manager,
|
||||
self._memstats) if policy_cls.need_mem_stats else None
|
||||
self._placement_policy = policy_cls(chunk_manager, self._mem_stats_collector)
|
||||
self._placement_policy = policy_cls(chunk_manager, self._mem_stats_collector, **placement_kwargs)
|
||||
self._compute_list: List[Tuple[Chunk, ...]] = []
|
||||
self._compute_idx: int = -1
|
||||
|
||||
|
@ -133,10 +137,6 @@ class GeminiManager:
|
|||
if self._warmup and self._placement_policy.need_mem_stats:
|
||||
self._compute_list.append(chunks)
|
||||
|
||||
@property
|
||||
def default_device(self):
|
||||
return self._placement_policy.get_default_device()
|
||||
|
||||
def sample_overall_data(self):
|
||||
if self._mem_stats_collector:
|
||||
self._mem_stats_collector.sample_overall_data()
|
||||
|
@ -159,6 +159,6 @@ class GeminiManager:
|
|||
def is_cuda_margin_mem_avail(self) -> bool:
|
||||
return self._placement_policy.need_mem_stats
|
||||
|
||||
@staticmethod
|
||||
def get_default_device(policy_name: str) -> torch.device:
|
||||
return PlacementPolicyFactory.get_default_device(policy_name)
|
||||
def setup_grads_device(self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor,
|
||||
torch.device]) -> None:
|
||||
self._placement_policy.setup_grads_device(params, grads_device_map)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
import copy
|
||||
import math
|
||||
import warnings
|
||||
from typing import Any, Dict, Iterator, OrderedDict, Set, Tuple
|
||||
from typing import Any, Dict, Iterator, OrderedDict, Set, Tuple, Union
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
@ -10,16 +10,17 @@ from torch.nn import Parameter
|
|||
from torch.optim import Optimizer
|
||||
|
||||
from colossalai.amp.naive_amp.mixed_precision_mixin import BF16MixedPrecisionMixin, FP16MixedPrecisionMixin
|
||||
from colossalai.checkpoint_io.utils import StateDictSharder
|
||||
from colossalai.checkpoint_io.utils import calculate_tensor_size, StateDictSharder
|
||||
from colossalai.interface import OptimizerWrapper
|
||||
from colossalai.logging import get_dist_logger
|
||||
from colossalai.nn.optimizer import ColossalaiOptimizer, CPUAdam, FusedAdam, HybridAdam
|
||||
from colossalai.nn.optimizer import CPUAdam, FusedAdam, HybridAdam
|
||||
from colossalai.tensor.d_tensor import is_distributed_tensor
|
||||
from colossalai.utils import disposable, get_current_device, is_ddp_ignored
|
||||
|
||||
from .chunk import Chunk, ChunkManager
|
||||
from .gemini_ddp import ZeroDDP
|
||||
from .gemini_ddp import GeminiDDP
|
||||
|
||||
__all__ = ['ZeroOptimizer', 'GeminiAdamOptimizer']
|
||||
__all__ = ['GeminiOptimizer', 'GeminiAdamOptimizer']
|
||||
|
||||
_AVAIL_OPTIM_LIST = {FusedAdam, CPUAdam, HybridAdam}
|
||||
|
||||
|
@ -27,7 +28,7 @@ _AVAIL_OPTIM_LIST = {FusedAdam, CPUAdam, HybridAdam}
|
|||
class GeminiFP16MixedPrecisionMixin(FP16MixedPrecisionMixin):
|
||||
|
||||
def __init__(self,
|
||||
module: ZeroDDP,
|
||||
module: GeminiDDP,
|
||||
initial_scale: float = 2**16,
|
||||
min_scale: float = 1,
|
||||
growth_factor: float = 2,
|
||||
|
@ -46,11 +47,11 @@ class GeminiFP16MixedPrecisionMixin(FP16MixedPrecisionMixin):
|
|||
self.module.overflow_counter = 0
|
||||
|
||||
|
||||
class ZeroOptimizer(ColossalaiOptimizer):
|
||||
"""A wrapper for optimizer. ``ZeroDDP`` and ``ZeroOptimizer`` implement Zero Redundancy Optimizer (ZeRO state-3).
|
||||
class GeminiOptimizer(OptimizerWrapper):
|
||||
"""A wrapper for optimizer. ``GeminiDDP`` and ``GeminiOptimizer`` implement Zero Redundancy Optimizer (ZeRO state-3).
|
||||
|
||||
Note:
|
||||
You must use ``ZeroDDP`` with ``ZeroOptimizer``.
|
||||
You must use ``GeminiDDP`` with ``GeminiOptimizer``.
|
||||
|
||||
Note:
|
||||
Make sure you set ``placement_policy`` of ``GeminiManager`` to `"auto"`,
|
||||
|
@ -58,7 +59,7 @@ class ZeroOptimizer(ColossalaiOptimizer):
|
|||
|
||||
Args:
|
||||
optim (Optimizer): An Optimizer instance.
|
||||
module (ZeroDDP): A ``ZeroDDP`` instance.
|
||||
module (GeminiDDP): A ``GeminiDDP`` instance.
|
||||
gpu_margin_mem_ratio (float, optional): The ratio of GPU remaining memory (after the first forward-backward)
|
||||
which will be used when using hybrid CPU optimizer.
|
||||
This argument is meaningless when `placement_policy` of `GeminiManager` is not "auto".
|
||||
|
@ -70,15 +71,15 @@ class ZeroOptimizer(ColossalaiOptimizer):
|
|||
growth_interval (float, optional): Growth_interval used by DynamicGradScaler. Defaults to 1000.
|
||||
hysteresis (float, optional): Hysteresis used by DynamicGradScaler. Defaults to 2.
|
||||
max_scale (int, optional): Max_scale used by DynamicGradScaler. Defaults to 2**32.
|
||||
clipping_norm (float, optional): The norm value used to clip gradient. Defaults to 0.0.
|
||||
max_norm (float, optional): The norm value used to clip gradient. Defaults to 0.0.
|
||||
norm_type (float, optional): The type of norm used for gradient clipping. Currently, only L2-norm (norm_type=2.0)
|
||||
is supported in ZeroOptimizer. Defaults to 2.0.
|
||||
is supported in GeminiOptimizer. Defaults to 2.0.
|
||||
verbose (bool, optional): Whether to print verbose information, including grad overflow info. Defaults to False.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
optim: Optimizer,
|
||||
module: ZeroDDP,
|
||||
module: GeminiDDP,
|
||||
gpu_margin_mem_ratio: float = 0.0,
|
||||
initial_scale: float = 2**32,
|
||||
min_scale: float = 1,
|
||||
|
@ -87,12 +88,12 @@ class ZeroOptimizer(ColossalaiOptimizer):
|
|||
growth_interval: int = 1000,
|
||||
hysteresis: int = 2,
|
||||
max_scale: float = 2**32,
|
||||
clipping_norm: float = 0.0,
|
||||
max_norm: float = 0.0,
|
||||
norm_type: float = 2.0,
|
||||
verbose: bool = False,
|
||||
**defaults: Any):
|
||||
super().__init__(optim)
|
||||
assert isinstance(module, ZeroDDP)
|
||||
assert isinstance(module, GeminiDDP)
|
||||
assert type(optim) in _AVAIL_OPTIM_LIST, "You should use an optimizer in the available list:\n" \
|
||||
f"{_AVAIL_OPTIM_LIST}"
|
||||
self.module = module
|
||||
|
@ -101,8 +102,8 @@ class ZeroOptimizer(ColossalaiOptimizer):
|
|||
self.param_to_range: Dict[Parameter, Tuple[int, int]] = dict()
|
||||
self.param_to_chunk32: Dict[Parameter, Chunk] = dict()
|
||||
self.chunk16_set: Set[Chunk] = set()
|
||||
self.clipping_flag = clipping_norm > 0.0
|
||||
self.max_norm = clipping_norm
|
||||
self.clipping_flag = max_norm > 0.0
|
||||
self.max_norm = max_norm
|
||||
self.verbose = verbose
|
||||
self.param_groups_backup = list()
|
||||
|
||||
|
@ -111,7 +112,7 @@ class ZeroOptimizer(ColossalaiOptimizer):
|
|||
self.id_to_fake_params: Dict[int, Parameter] = dict()
|
||||
|
||||
if self.clipping_flag:
|
||||
assert norm_type == 2.0, "ZeroOptimizer only supports L2 norm now"
|
||||
assert norm_type == 2.0, "GeminiOptimizer only supports L2 norm now"
|
||||
|
||||
ddp_param_list = []
|
||||
for name, param in module.named_parameters():
|
||||
|
@ -703,8 +704,19 @@ class ZeroOptimizer(ColossalaiOptimizer):
|
|||
|
||||
yield sharder.current_block, sharder.current_block_size
|
||||
|
||||
def clip_grad_by_value(self, clip_value: float, *args, **kwargs) -> None:
|
||||
raise NotImplementedError('Gemini does not support clip_grad_by_value')
|
||||
|
||||
class GeminiAdamOptimizer(ZeroOptimizer):
|
||||
def clip_grad_by_norm(self,
|
||||
max_norm: Union[float, int],
|
||||
norm_type: Union[float, int] = 2,
|
||||
error_if_nonfinite: bool = False,
|
||||
*args,
|
||||
**kwargs) -> torch.Tensor:
|
||||
warnings.warn(f'Gemini controls grad clipping by itself, so you should not use clip_grad_by_norm')
|
||||
|
||||
|
||||
class GeminiAdamOptimizer(GeminiOptimizer):
|
||||
|
||||
def __init__(self, model: torch.nn.Module, **defaults: Any) -> None:
|
||||
optimizer = HybridAdam(model.parameters(), **defaults)
|
||||
|
|
|
@ -9,7 +9,7 @@ class MemStats(object):
|
|||
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
Store the non model data statistics used for Gemini and ZeroOptimizer.
|
||||
Store the non model data statistics used for Gemini and GeminiOptimizer.
|
||||
"""
|
||||
# (preop_step, List[param])
|
||||
self._step_param_dict = dict()
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import functools
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from time import time
|
||||
from typing import Dict, List, Optional, Tuple, Type
|
||||
|
@ -7,6 +8,7 @@ import torch
|
|||
|
||||
from colossalai.utils import get_current_device
|
||||
from colossalai.utils.memory import colo_device_memory_capacity
|
||||
from colossalai.zero.gemini.chunk import Chunk
|
||||
|
||||
from .chunk import Chunk, ChunkManager
|
||||
from .memory_tracer import ChunkMemStatsCollector
|
||||
|
@ -17,7 +19,8 @@ class PlacementPolicy(ABC):
|
|||
|
||||
def __init__(self,
|
||||
chunk_manager: ChunkManager,
|
||||
mem_stats_collector: Optional[ChunkMemStatsCollector] = None) -> None:
|
||||
mem_stats_collector: Optional[ChunkMemStatsCollector] = None,
|
||||
**kwargs) -> None:
|
||||
self.chunk_manager = chunk_manager
|
||||
self.mem_stats_collector: Optional[ChunkMemStatsCollector] = mem_stats_collector
|
||||
|
||||
|
@ -25,57 +28,87 @@ class PlacementPolicy(ABC):
|
|||
def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]:
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def get_default_device() -> torch.device:
|
||||
return torch.device('cpu')
|
||||
@abstractmethod
|
||||
def setup_grads_device(self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor,
|
||||
torch.device]) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class CPUPlacementPolicy(PlacementPolicy):
|
||||
class StaticPlacementPolicy(PlacementPolicy):
|
||||
|
||||
def __init__(self,
|
||||
chunk_manager: ChunkManager,
|
||||
mem_stats_collector: Optional[ChunkMemStatsCollector] = None) -> None:
|
||||
mem_stats_collector: Optional[ChunkMemStatsCollector] = None,
|
||||
shard_param_frac: float = 1.0,
|
||||
offload_optim_frac: float = 0.0,
|
||||
offload_param_frac: float = 0.0,
|
||||
**kwargs) -> None:
|
||||
super().__init__(chunk_manager, mem_stats_collector=mem_stats_collector)
|
||||
if offload_param_frac > 0.0 and (shard_param_frac != 1.0 or offload_optim_frac != 1.0):
|
||||
warnings.warn('offload_param_frac is ignored when shard_param_frac != 1.0 or offload_optim_frac != 1.0')
|
||||
offload_param_frac = 0.0
|
||||
self.shard_param_frac = shard_param_frac
|
||||
self.offload_optim_frac = offload_optim_frac
|
||||
self.offload_param_frac = offload_param_frac
|
||||
# these should be initialized in setup_grads_device
|
||||
self.keep_gathered_chunk_mem = 0.0
|
||||
self.keep_cuda_chunk_mem = 0.0
|
||||
|
||||
def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]:
|
||||
volume = 0
|
||||
start = time()
|
||||
can_shard_chunk_mem = sum(chunk.chunk_mem for chunk in can_evict_chunks)
|
||||
can_offload_chunk_mem = can_shard_chunk_mem
|
||||
for chunk in can_evict_chunks:
|
||||
if can_shard_chunk_mem <= self.keep_gathered_chunk_mem:
|
||||
break
|
||||
self.chunk_manager.release_chunk(chunk)
|
||||
# real saved mem is chunk_mem - shard_mem, for simplicity we use chunk_mem
|
||||
can_shard_chunk_mem -= chunk.chunk_mem
|
||||
for chunk in can_evict_chunks:
|
||||
if can_offload_chunk_mem <= self.keep_cuda_chunk_mem:
|
||||
break
|
||||
self.chunk_manager.move_chunk(chunk, torch.device('cpu'))
|
||||
volume += chunk.chunk_mem
|
||||
return volume, time() - start
|
||||
# real saved mem is shard_mem, for simplicity we use chunk_mem
|
||||
can_offload_chunk_mem -= chunk.chunk_mem
|
||||
return 0, 0.0
|
||||
|
||||
def setup_grads_device(self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor,
|
||||
torch.device]) -> None:
|
||||
total_chunk_mem = sum(self.chunk_manager.get_chunk(p).chunk_mem for p in params)
|
||||
|
||||
class CUDAPlacementPolicy(PlacementPolicy):
|
||||
|
||||
def __init__(self,
|
||||
chunk_manager: ChunkManager,
|
||||
mem_stats_collector: Optional[ChunkMemStatsCollector] = None) -> None:
|
||||
assert torch.cuda.is_available(), 'Cannot use CUDATensorPlacementPolicy when CUDA is not available'
|
||||
super().__init__(chunk_manager, mem_stats_collector=mem_stats_collector)
|
||||
|
||||
def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]:
|
||||
return 0, 0
|
||||
|
||||
@staticmethod
|
||||
def get_default_device() -> torch.device:
|
||||
return get_current_device()
|
||||
offload_optim_chunk_mem = total_chunk_mem * self.offload_optim_frac
|
||||
offloaded_optim_chunk_mem = 0
|
||||
chunks = set(self.chunk_manager.get_chunk(p) for p in params)
|
||||
for chunk in chunks:
|
||||
params = chunk.get_tensors()
|
||||
# init offload optim settings
|
||||
# keep gathered chunks are in CUDA
|
||||
if chunk.keep_gathered or offloaded_optim_chunk_mem >= offload_optim_chunk_mem:
|
||||
device = get_current_device()
|
||||
else:
|
||||
device = torch.device('cpu')
|
||||
# real offloaded mem is chunk.shard_mem, for simplicity we use chunk mem here
|
||||
offloaded_optim_chunk_mem += chunk.chunk_mem
|
||||
for p in params:
|
||||
grads_device_map[p] = device
|
||||
self.keep_gathered_chunk_mem = total_chunk_mem * (1 - self.shard_param_frac)
|
||||
self.keep_cuda_chunk_mem = total_chunk_mem * (1 - self.offload_param_frac)
|
||||
|
||||
|
||||
class AutoPlacementPolicy(PlacementPolicy):
|
||||
|
||||
need_mem_stats: bool = True
|
||||
# model data will use 1-_warmup_non_model_data_ratio CUDA memory in warmup phase
|
||||
# you can set them by AutoPlacementPolicy.set_warmup_non_model_data_ratio()
|
||||
# and AutoPlacementPolicy.set_steady_cuda_cap_ratio()
|
||||
_warmup_non_model_data_ratio: float = 0.8
|
||||
_steady_cuda_cap_ratio: float = 0.9
|
||||
|
||||
def __init__(self,
|
||||
chunk_manager: ChunkManager,
|
||||
mem_stats_collector: Optional[ChunkMemStatsCollector] = None) -> None:
|
||||
mem_stats_collector: Optional[ChunkMemStatsCollector] = None,
|
||||
warmup_non_model_data_ratio: float = 0.8,
|
||||
steady_cuda_cap_ratio: float = 0.9,
|
||||
**kwargs) -> None:
|
||||
super().__init__(chunk_manager, mem_stats_collector=mem_stats_collector)
|
||||
# model data will use 1-_warmup_non_model_data_ratio CUDA memory in warmup phase
|
||||
# you can set them by AutoPlacementPolicy.set_warmup_non_model_data_ratio()
|
||||
# and AutoPlacementPolicy.set_steady_cuda_cap_ratio()
|
||||
self._warmup_non_model_data_ratio = warmup_non_model_data_ratio
|
||||
self._steady_cuda_cap_ratio = steady_cuda_cap_ratio
|
||||
|
||||
def evict_tensors(self,
|
||||
can_evict_chunks: List[Chunk],
|
||||
|
@ -105,11 +138,11 @@ class AutoPlacementPolicy(PlacementPolicy):
|
|||
used_cuda_model_data = self.chunk_manager.total_mem['cuda']
|
||||
if warmup:
|
||||
# We designate a part of CUDA memory for model data in warmup iterations.
|
||||
max_cuda_non_model_data_per_period = cuda_capacity * AutoPlacementPolicy._warmup_non_model_data_ratio
|
||||
max_cuda_non_model_data_per_period = cuda_capacity * self._warmup_non_model_data_ratio
|
||||
else:
|
||||
# max non-model-data cuda memory consumption of this sampling moment and the next sampling moment.
|
||||
max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage('cuda')
|
||||
cuda_capacity *= AutoPlacementPolicy._steady_cuda_cap_ratio
|
||||
cuda_capacity *= self._steady_cuda_cap_ratio
|
||||
total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period
|
||||
avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data
|
||||
freed_cuda_model_data = 0
|
||||
|
@ -145,89 +178,22 @@ class AutoPlacementPolicy(PlacementPolicy):
|
|||
next_compute_idx = sorted(next_compute_idx.items(), key=lambda pair: pair[1], reverse=True)
|
||||
return [t for (t, idx) in next_compute_idx]
|
||||
|
||||
@staticmethod
|
||||
def set_warmup_non_model_data_ratio(ratio: float) -> None:
|
||||
ratio = float(ratio)
|
||||
assert 0.0 < ratio < 1.0
|
||||
AutoPlacementPolicy._warmup_non_model_data_ratio = ratio
|
||||
|
||||
@staticmethod
|
||||
def set_steady_cuda_cap_ratio(ratio: float) -> None:
|
||||
ratio = float(ratio)
|
||||
assert 0.0 < ratio < 1.0
|
||||
AutoPlacementPolicy._steady_cuda_cap_ratio = ratio
|
||||
|
||||
|
||||
class ConstPlacementPolicy(PlacementPolicy):
|
||||
|
||||
need_mem_stats: bool = False
|
||||
_accessed_memory_boundary = 512 * 1024**2
|
||||
|
||||
def __init__(self,
|
||||
chunk_manager: ChunkManager,
|
||||
mem_stats_collector: Optional[ChunkMemStatsCollector] = None) -> None:
|
||||
super().__init__(chunk_manager, mem_stats_collector=mem_stats_collector)
|
||||
|
||||
def evict_tensors(self,
|
||||
can_evict_chunks: List[Chunk],
|
||||
cuda_demand: int = 0,
|
||||
warmup: bool = True,
|
||||
compute_list: Optional[List[Tuple[Chunk, ...]]] = None,
|
||||
compute_idx: int = 0,
|
||||
**kwargs) -> Tuple[int, float]:
|
||||
"""
|
||||
See the docstrings in the class `AutoPlacementPolicy`.
|
||||
"""
|
||||
start = time()
|
||||
used_accessed_memory = self.chunk_manager.accessed_mem
|
||||
avail_accessed_memory = ConstPlacementPolicy._accessed_memory_boundary - used_accessed_memory
|
||||
freed_accessed_memory = 0
|
||||
|
||||
if avail_accessed_memory < cuda_demand:
|
||||
to_free_memory = cuda_demand - avail_accessed_memory
|
||||
to_free_chunks = can_evict_chunks
|
||||
|
||||
if not warmup:
|
||||
# sort all chunks
|
||||
to_free_chunks = self._sort_can_evict_chunks(tuple(to_free_chunks), compute_idx, tuple(compute_list))
|
||||
|
||||
for chunk in to_free_chunks:
|
||||
if freed_accessed_memory >= to_free_memory:
|
||||
break
|
||||
|
||||
self.chunk_manager.release_chunk(chunk)
|
||||
self.chunk_manager.move_chunk(chunk, torch.device('cpu'))
|
||||
freed_accessed_memory += chunk.chunk_mem
|
||||
|
||||
if freed_accessed_memory < to_free_memory:
|
||||
raise RuntimeError(f"Adjust layout failed! No enough CUDA memory! "
|
||||
f"Need {to_free_memory}, freed {freed_accessed_memory}")
|
||||
return freed_accessed_memory, time() - start
|
||||
|
||||
@staticmethod
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def _sort_can_evict_chunks(can_evict_chunks: tuple, compute_idx: int, compute_list: tuple) -> list:
|
||||
next_compute_idx = {chunk: len(compute_list) for chunk in can_evict_chunks}
|
||||
for i in range(len(compute_list) - 1, compute_idx, -1):
|
||||
for chunk in compute_list[i]:
|
||||
if chunk in next_compute_idx:
|
||||
next_compute_idx[chunk] = i
|
||||
next_compute_idx = sorted(next_compute_idx.items(), key=lambda pair: pair[1], reverse=True)
|
||||
return [t for (t, idx) in next_compute_idx]
|
||||
|
||||
@staticmethod
|
||||
def set_const_memory_boundary(cuda_memory_mb: int) -> None:
|
||||
boundary = int(cuda_memory_mb * 1024**2)
|
||||
assert boundary > 0
|
||||
ConstPlacementPolicy._accessed_memory_boundary = boundary
|
||||
def setup_grads_device(self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor,
|
||||
torch.device]) -> None:
|
||||
for p in params:
|
||||
chunk = self.chunk_manager.get_chunk(p)
|
||||
# init offload optim settings
|
||||
# keep gathered chunks are in CUDA
|
||||
if chunk.keep_gathered:
|
||||
grads_device_map[p] = get_current_device()
|
||||
else:
|
||||
grads_device_map[p] = torch.device('cpu')
|
||||
|
||||
|
||||
class PlacementPolicyFactory:
|
||||
policies: Dict[str, Type[PlacementPolicy]] = {
|
||||
'cpu': CPUPlacementPolicy,
|
||||
'cuda': CUDAPlacementPolicy,
|
||||
'auto': AutoPlacementPolicy,
|
||||
'const': ConstPlacementPolicy
|
||||
'static': StaticPlacementPolicy,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
|
@ -239,8 +205,3 @@ class PlacementPolicyFactory:
|
|||
@staticmethod
|
||||
def get_policy_names():
|
||||
return tuple(PlacementPolicyFactory.policies.keys())
|
||||
|
||||
@staticmethod
|
||||
def get_default_device(policy_name: str) -> torch.device:
|
||||
policy_cls = PlacementPolicyFactory.create(policy_name)
|
||||
return policy_cls.get_default_device()
|
||||
|
|
|
@ -64,13 +64,13 @@ def get_static_torch_model(zero_ddp_model,
|
|||
device=torch.device("cpu"),
|
||||
dtype=torch.float32,
|
||||
only_rank_0=True) -> torch.nn.Module:
|
||||
"""Get a static torch.nn.Module model from the given ZeroDDP module.
|
||||
You should notice that the original ZeroDDP model is not modified.
|
||||
"""Get a static torch.nn.Module model from the given GeminiDDP module.
|
||||
You should notice that the original GeminiDDP model is not modified.
|
||||
Thus, you can use the original model in further training.
|
||||
But you should not use the returned torch model to train, this can cause unexpected errors.
|
||||
|
||||
Args:
|
||||
zero_ddp_model (ZeroDDP): a zero ddp model
|
||||
zero_ddp_model (GeminiDDP): a zero ddp model
|
||||
device (torch.device): the device of the final torch model
|
||||
dtype (torch.dtype): the dtype of the final torch model
|
||||
only_rank_0 (bool): if True, only rank0 has the converted torch model
|
||||
|
@ -78,8 +78,8 @@ def get_static_torch_model(zero_ddp_model,
|
|||
Returns:
|
||||
torch.nn.Module: a static torch model used for saving checkpoints or numeric checks
|
||||
"""
|
||||
from colossalai.zero.gemini.gemini_ddp import ZeroDDP
|
||||
assert isinstance(zero_ddp_model, ZeroDDP)
|
||||
from colossalai.zero.gemini.gemini_ddp import GeminiDDP
|
||||
assert isinstance(zero_ddp_model, GeminiDDP)
|
||||
|
||||
state_dict = zero_ddp_model.state_dict(only_rank_0=only_rank_0)
|
||||
colo_model = zero_ddp_model.module
|
||||
|
|
|
@ -57,8 +57,8 @@ class GradientStore(BaseStore):
|
|||
self._grads_of_params[group_id][param_id].append(grad)
|
||||
|
||||
def add_gradients_by_param_id(self, grad: Tensor, grad_idx: int, group_id: int, param_id: int):
|
||||
"""For old gradient accumulation, not in use now.
|
||||
Add a gradient slice on an existing slice of the parameter's gradient
|
||||
"""Add a gradient slice on an existing slice of the parameter's gradient
|
||||
Used when no_sync is not activated.
|
||||
|
||||
Args:
|
||||
grad (Tensor): The split gradient to append to list
|
||||
|
|
|
@ -80,9 +80,6 @@ class LowLevelZeroOptimizer(OptimizerWrapper):
|
|||
tp_process_group: Optional[ProcessGroup] = None, # if using tp
|
||||
forced_dtype: Optional[torch.dtype] = None):
|
||||
|
||||
# TODO:
|
||||
# 1. state_dict for checkpoint IO
|
||||
|
||||
super(LowLevelZeroOptimizer, self).__init__(optim=optimizer)
|
||||
self._dtype = self.optim.param_groups[0]['params'][0].dtype
|
||||
self._logger = get_dist_logger()
|
||||
|
@ -277,7 +274,11 @@ class LowLevelZeroOptimizer(OptimizerWrapper):
|
|||
sync_tensor(flat_grads_per_rank[rank], grad_list)
|
||||
for grad in grad_list:
|
||||
param_id = self._bucket_store.get_param_id_of_grad(grad)
|
||||
self._grad_store.append_gradients_by_param_id(grad, group_id, param_id)
|
||||
if len(self._grad_store.get_partitioned_gradients_by_param_id(group_id,
|
||||
param_id)) < self._world_size:
|
||||
self._grad_store.append_gradients_by_param_id(grad, group_id, param_id)
|
||||
else:
|
||||
self._grad_store.add_gradients_by_param_id(grad, rank, group_id, param_id)
|
||||
|
||||
else:
|
||||
flat_grads_list = list(flat_grads.split(len(flat_grads) // self._world_size))
|
||||
|
@ -291,7 +292,10 @@ class LowLevelZeroOptimizer(OptimizerWrapper):
|
|||
sync_tensor(recieved_grad, grad_in_bucket_current_rank)
|
||||
for grad in grad_in_bucket_current_rank:
|
||||
param_id = self._bucket_store.get_param_id_of_grad(grad)
|
||||
self._grad_store.append_gradients_by_param_id(grad, group_id, param_id)
|
||||
if len(self._grad_store.get_partitioned_gradients_by_param_id(group_id, param_id)) < 1:
|
||||
self._grad_store.append_gradients_by_param_id(grad, group_id, param_id)
|
||||
else:
|
||||
self._grad_store.add_gradients_by_param_id(grad, 0, group_id, param_id)
|
||||
|
||||
self._bucket_store.reset()
|
||||
|
||||
|
@ -303,7 +307,7 @@ class LowLevelZeroOptimizer(OptimizerWrapper):
|
|||
# or got a grad of param from another group
|
||||
# after reduction, the bucket will be empty
|
||||
if self._bucket_store.num_elements_in_bucket() + param_size > self._reduce_bucket_size or \
|
||||
group_id != self._bucket_store.current_group_id:
|
||||
group_id != self._bucket_store.current_group_id:
|
||||
self._run_reduction()
|
||||
|
||||
padding_size = self._param_store.get_param_padding_size(param)
|
||||
|
@ -315,7 +319,8 @@ class LowLevelZeroOptimizer(OptimizerWrapper):
|
|||
|
||||
def backward(self, loss, retain_graph=False):
|
||||
assert not(self._partition_grads and not self.require_grad_sync), \
|
||||
"ZeRO2(partition_grads) and gradient accumulation(no_sync) are not compatible"
|
||||
"ZeRO2(partition_grads) and no_sync are not compatible"
|
||||
|
||||
if self.mixed_precision_mixin is not None:
|
||||
loss = self.mixed_precision_mixin.pre_backward(loss)
|
||||
|
||||
|
@ -537,9 +542,12 @@ class LowLevelZeroOptimizer(OptimizerWrapper):
|
|||
for k, v in state.items():
|
||||
if isinstance(v, torch.Tensor) and k != 'step':
|
||||
working_param = self._param_store.master_to_working_param[id(param)]
|
||||
gather_tensor = [torch.zeros_like(v) for _ in range(self._world_size)]
|
||||
dist.all_gather(gather_tensor, v, group=self.dp_pg)
|
||||
param_state = torch.stack(gather_tensor).view(-1)[:working_param.numel()].reshape_as(working_param)
|
||||
gather_tensor = [
|
||||
torch.zeros(v.shape, device='cuda', dtype=v.dtype) for _ in range(self._world_size)
|
||||
]
|
||||
dist.all_gather(gather_tensor, v.cuda(), group=self.dp_pg)
|
||||
param_state = torch.stack(gather_tensor).view(-1)[:working_param.numel()].reshape_as(
|
||||
working_param).cpu()
|
||||
zero_state[param][k] = param_state
|
||||
|
||||
states_dict = self._pack_state(zero_state)
|
||||
|
@ -562,10 +570,9 @@ class LowLevelZeroOptimizer(OptimizerWrapper):
|
|||
if padding_size > 0:
|
||||
v = torch.nn.functional.pad(v, [0, padding_size])
|
||||
v_list = v.split(v.numel() // self._world_size)
|
||||
zero_state_dict['state'][param_idx][k] = v_list[self._local_rank].detach()
|
||||
zero_state_dict['state'][param_idx][k] = v_list[self._local_rank].detach().clone()
|
||||
|
||||
self.optim.load_state_dict(zero_state_dict)
|
||||
zero_state_dict = dict()
|
||||
|
||||
def state_dict_shard(self, max_shard_size: int = 1024) -> Iterator[Tuple[Dict, int]]:
|
||||
"""Returns dictionaries containing a whole state of the module one by one. The max size of dictionary shard is specified by ``max_shard_size``.
|
||||
|
@ -594,9 +601,10 @@ class LowLevelZeroOptimizer(OptimizerWrapper):
|
|||
|
||||
for k, v in states.items():
|
||||
if isinstance(v, torch.Tensor) and k != 'step':
|
||||
state_tensor = [torch.zeros_like(v) for _ in range(self._world_size)]
|
||||
dist.all_gather(state_tensor, v, group=self.dp_pg)
|
||||
state_tensor = torch.stack(state_tensor).view(-1)[:working_param.numel()].reshape_as(working_param)
|
||||
state_tensor = [torch.zeros(v.shape, device='cuda', dtype=v.dtype) for _ in range(self._world_size)]
|
||||
dist.all_gather(state_tensor, v.cuda(), group=self.dp_pg)
|
||||
state_tensor = torch.stack(state_tensor).view(-1)[:working_param.numel()].reshape_as(
|
||||
working_param).cpu()
|
||||
current_block_size += state_tensor.numel()
|
||||
current_block[k] = state_tensor
|
||||
|
||||
|
|
|
@ -1,5 +1,41 @@
|
|||
# Low Level ZeRO
|
||||
>Low Level ZeRO == ZeRO-DP stage 1 and 2, we would denote it as ZeRO.
|
||||
## Examples of ZeRO and gradient accumulation
|
||||
|
||||
The code below only shows a typical gradient accumulation process, and it drops a lot of details, such as the processing of loss.
|
||||
|
||||
```python
|
||||
# examples of ZeRO1 with gradient accumulation
|
||||
...
|
||||
outputs = model(input)
|
||||
loss = SomeLoss(outputs)
|
||||
if (idx + 1) % ACCUMULATE_STEP != 0:
|
||||
with booster.no_sync(model, optimizer):
|
||||
# under this context, the gradient would not sync when backward,
|
||||
# left each rank having different gradient.
|
||||
# It saves the backward time
|
||||
booster.backward(loss, optimizer)
|
||||
continue
|
||||
else:
|
||||
# need to sync all the accumulated gradient
|
||||
booster.backward(loss, optimizer):
|
||||
optimizer.step()
|
||||
...
|
||||
```
|
||||
|
||||
```python
|
||||
# example of ZeRO2 with gradient accumulation
|
||||
|
||||
...
|
||||
outputs = model(input)
|
||||
loss = SomeLoss(outputs)
|
||||
# ZeRO2 split the gradients and can NOT accumulate gradient with syncing.
|
||||
booster.backward(loss, optimizer)
|
||||
if (idx + 1) % ACCUMULATE_STEP == 0:
|
||||
optimizer.step()
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
## Design:
|
||||
### Notion
|
||||
|
@ -25,11 +61,11 @@ The data structure looks like this:
|
|||
```
|
||||
After that, the gradients would be flattened by rank, and the data structure looks like this:
|
||||
```
|
||||
# g-0 means flatten([g-00, g-10])
|
||||
# g-X0 means flatten([g-00, g-10])
|
||||
{
|
||||
0: [g-0],
|
||||
1: [g-1],
|
||||
2: [g-2]
|
||||
0: [g-X0],
|
||||
1: [g-X1],
|
||||
2: [g-X2]
|
||||
}
|
||||
```
|
||||
For zero1, we iterate the dictionary and do `all_reduce`. For zero2, we can just do `reduce-scatter`.
|
||||
|
|
|
@ -109,6 +109,6 @@ def zero_optim_wrapper(model: nn.Module,
|
|||
config_dict['clip_grad_norm'] = max_norm
|
||||
return LowLevelZeroOptimizer(optimizer, **config_dict, verbose=verbose)
|
||||
else:
|
||||
from colossalai.zero.gemini.gemini_optimizer import ZeroOptimizer
|
||||
from colossalai.zero.gemini.gemini_optimizer import GeminiOptimizer
|
||||
config_dict['clipping_norm'] = max_norm
|
||||
return ZeroOptimizer(optimizer, model, **config_dict, verbose=verbose)
|
||||
return GeminiOptimizer(optimizer, model, **config_dict, verbose=verbose)
|
||||
|
|
|
@ -18,7 +18,7 @@ RUN apt-get update && \
|
|||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# install torch
|
||||
RUN conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.3 -c pytorch
|
||||
RUN conda install -y pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.3 -c pytorch
|
||||
|
||||
# install ninja
|
||||
RUN apt-get update && \
|
||||
|
@ -43,8 +43,9 @@ RUN git clone -b ${VERSION} https://github.com/hpcaitech/ColossalAI.git \
|
|||
RUN pip install --no-cache-dir titans
|
||||
|
||||
# install tensornvme
|
||||
RUN conda install cmake && \
|
||||
RUN conda install -y cmake && \
|
||||
git clone https://github.com/hpcaitech/TensorNVMe.git && \
|
||||
cd TensorNVMe && \
|
||||
apt update -y && apt install -y libaio-dev && \
|
||||
pip install -r requirements.txt && \
|
||||
pip install -v --no-cache-dir .
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
</div>
|
||||
|
||||
## 新闻
|
||||
* [2023/09] [70 Billion Parameter LLaMA2 Model Training Accelerated by 195%](https://www.hpc-ai.tech/blog/70b-llama2-training)
|
||||
* [2023/07] [HPC-AI Tech Raises 22 Million USD in Series A Funding](https://www.hpc-ai.tech/blog/hpc-ai-tech-raises-22-million-usd-in-series-a-funding-to-fuel-team-expansion-and-business-growth)
|
||||
* [2023/07] [65B Model Pretraining Accelerated by 38%, Best Practices for Building LLaMA-Like Base Models Open-Source](https://www.hpc-ai.tech/blog/large-model-pretraining)
|
||||
* [2023/03] [ColossalChat: An Open-Source Solution for Cloning ChatGPT With a Complete RLHF Pipeline](https://medium.com/@yangyou_berkeley/colossalchat-an-open-source-solution-for-cloning-chatgpt-with-a-complete-rlhf-pipeline-5edf08fb538b)
|
||||
|
@ -49,7 +50,7 @@
|
|||
<li>
|
||||
<a href="#并行训练样例展示">并行训练样例展示</a>
|
||||
<ul>
|
||||
<li><a href="#LLaMA">LLaMA</a></li>
|
||||
<li><a href="#LLaMA2">LLaMA 1/2</a></li>
|
||||
<li><a href="#GPT-3">GPT-3</a></li>
|
||||
<li><a href="#GPT-2">GPT-2</a></li>
|
||||
<li><a href="#BERT">BERT</a></li>
|
||||
|
@ -210,7 +211,16 @@ Colossal-AI 为您提供了一系列并行组件。我们的目标是让您的
|
|||
<p align="right">(<a href="#top">返回顶端</a>)</p>
|
||||
|
||||
## 并行训练样例展示
|
||||
### LLaMA
|
||||
### LLaMA2
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/llama2_pretraining.png" width=600/>
|
||||
</p>
|
||||
|
||||
- 700亿参数LLaMA2训练加速195%
|
||||
[[code]](https://github.com/hpcaitech/ColossalAI/tree/example/llama/examples/language/llama)
|
||||
[[blog]](https://www.hpc-ai.tech/blog/70b-llama2-training)
|
||||
|
||||
### LLaMA1
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/examples/images/LLaMA_pretraining.png" width=600/>
|
||||
</p>
|
||||
|
|
|
@ -54,32 +54,38 @@ We also provide a lightweight chunk search mechanism to help users automatically
|
|||
|
||||
We will use `GeminiDDP` to use ZeRO with chunk-based memory management. This is our new torch.Module wrapper which uses ZeRO-DP and Gemini. ZeRO is for parallelism and Gemini is for memory management.
|
||||
|
||||
Also Make sure that your model is initialized under the context of ColoInitContext.
|
||||
Gemini allows LazyInitContext, which can save memory when initializing large models with multi-GPUs.
|
||||
|
||||
If your model has `N` billion parameters and your GPU memory is `M` GB, we recommend you use LazyInitContext when `4N >= M`. Otherwise, LazyInitContext is optional.
|
||||
|
||||
<!--- doc-test-ignore-start -->
|
||||
```python
|
||||
with ColoInitContext(device='cpu', default_dist_spec=default_dist_spec, default_pg=default_pg):
|
||||
with LazyInitContext(default_device=torch.device('cuda')):
|
||||
model = gpt2_medium(checkpoint=True)
|
||||
```
|
||||
<!--- doc-test-ignore-end -->
|
||||
|
||||
Define the model parameters as follows:
|
||||
We've provided `Booster` API which is user-friendly. We recommend you use `Booster` API. But if you still want to use low level API, you can read below content of this section.
|
||||
|
||||
Wrap the model with `GeminiDDP`.
|
||||
|
||||
<!--- doc-test-ignore-start -->
|
||||
```python
|
||||
chunk_manager = init_chunk_manager(model=module,
|
||||
init_device=device,
|
||||
hidden_dim=hidden_dim,
|
||||
search_range_m=search_range_m,
|
||||
min_chunk_size_m=min_chunk_size_m)
|
||||
gemini_manager = GeminiManager(placement_policy, chunk_manager)
|
||||
model = GeminiDDP(model, hidden_dim=hidden_dim, min_chunk_size_m=min_chunk_size_m)
|
||||
```
|
||||
<!--- doc-test-ignore-end -->
|
||||
|
||||
`hidden_dim` is the hidden dimension of DNN. Users can provide this argument to speed up searching. If users do not know this argument before training, it is ok. We will use a default value 1024. `min_chunk_size_m` is a floating point, being the minimum chunk size divided by 2^20 (e.g., if min_chunk_size_m=2.5, then the minimum chunk size should be 2.5*(2^20)).If the aggregate size of parameters is still smaller than the minimum chunk size, all parameters will be compacted into one small chunk.
|
||||
|
||||
Initialization of the optimizer.
|
||||
<!--- doc-test-ignore-start -->
|
||||
```python
|
||||
optimizer = GeminiAdamOptimizer(model, lr=1e-3, initial_scale=2**5)
|
||||
```
|
||||
<!--- doc-test-ignore-start -->
|
||||
|
||||
Training
|
||||
<!--- doc-test-ignore-start -->
|
||||
```python
|
||||
optimizer.zero_grad()
|
||||
outputs = model(input_ids, attn_mask)
|
||||
|
@ -87,6 +93,7 @@ loss = criterion(outputs, input_ids)
|
|||
optimizer.backward(loss)
|
||||
optimizer.step()
|
||||
```
|
||||
<!--- doc-test-ignore-start -->
|
||||
> ⚠️ Note: Please do not use `loss.backward()`, the standard way of writing is `optimizer.backward(loss)`.
|
||||
|
||||
### Train GPT
|
||||
|
@ -142,46 +149,6 @@ class GPTLMLoss(nn.Module):
|
|||
return self.loss_fn(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
||||
```
|
||||
|
||||
Define tensor parallel and parameter sharding strategies for tensor parallelism:
|
||||
|
||||
```python
|
||||
def tensor_parallelize(model: torch.nn.Module, pg: ProcessGroup):
|
||||
for mn, module in model.named_modules():
|
||||
for pn, param in module.named_parameters(recurse=False):
|
||||
if hasattr(param, 'visited'):
|
||||
continue
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
if 'mlp.c_fc' in mn:
|
||||
if 'weight' in pn or 'bias' in pn:
|
||||
split_param_col_tp1d(param, pg)
|
||||
param.compute_spec.set_output_replicate(False)
|
||||
else:
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
elif 'mlp.c_proj' in mn:
|
||||
if 'weight' in pn:
|
||||
split_param_row_tp1d(param, pg)
|
||||
else:
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
elif 'wte' in mn or 'wpe' in mn:
|
||||
split_param_col_tp1d(param, pg)
|
||||
elif 'c_attn' in mn or 'c_proj' in mn:
|
||||
split_param_col_tp1d(param, pg)
|
||||
else:
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
|
||||
param.visited = True
|
||||
def split_param_single_dim_tp1d(dim: int, param: ColoParameter, pg: ProcessGroup):
|
||||
spec = (ShardSpec([dim], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
|
||||
param.set_tensor_spec(*spec)
|
||||
|
||||
|
||||
def split_param_row_tp1d(param: ColoParameter, pg: ProcessGroup):
|
||||
split_param_single_dim_tp1d(0, param, pg)
|
||||
|
||||
|
||||
def split_param_col_tp1d(param: ColoParameter, pg: ProcessGroup):
|
||||
split_param_single_dim_tp1d(-1, param, pg)
|
||||
```
|
||||
|
||||
Write a function to get random inputs:
|
||||
|
||||
|
@ -198,7 +165,7 @@ Finally, we define a model which uses Gemini + ZeRO DDP and define our training
|
|||
from colossalai.nn.optimizer import HybridAdam
|
||||
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.zero import ColoInitContext
|
||||
from colossalai.lazy import LazyInitContext
|
||||
from colossalai.booster.plugin import GeminiPlugin
|
||||
|
||||
def main():
|
||||
|
@ -214,17 +181,13 @@ def main():
|
|||
optimizer = HybridAdam(model.parameters(), lr=0.001)
|
||||
|
||||
torch.manual_seed(123)
|
||||
default_pg = ProcessGroup(tp_degree=args.tp_degree)
|
||||
default_dist_spec = ShardSpec([-1], [args.tp_degree])
|
||||
# build GPT model
|
||||
with ColoInitContext(device='cpu', default_dist_spec=default_dist_spec, default_pg=default_pg):
|
||||
with ColoInitContext(default_device=torch.device('cuda')):
|
||||
model = gpt2_medium(checkpoint=True)
|
||||
pg = default_pg
|
||||
# Tensor Parallelism (TP)
|
||||
tensor_parallelize(model, pg)
|
||||
|
||||
# Gemini + ZeRO DP, Note it must be used after TP
|
||||
plugin = GeminiPlugin(placement_policy='cuda', max_norm=1.0, initial_scale=2**5)
|
||||
|
||||
# Gemini + ZeRO DP
|
||||
plugin = GeminiPlugin(max_norm=1.0, initial_scale=2**5)
|
||||
booster = Booster(plugin=plugin)
|
||||
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
|
||||
|
||||
|
|
|
@ -53,32 +53,37 @@
|
|||
|
||||
我们将运用`GeminiDDP`的方式来使用基于Chunk内存管理的ZeRO。这是我们新包装的torch.Module ,它使用 ZeRO-DP 和 Gemini,其中ZeRO 用于并行,Gemini 用于内存管理。
|
||||
|
||||
同样需要确保你的模型是在 `ColoInitContext` 的上下文中初始化的。
|
||||
Gemini支持惰性初始化, 它可以节省多卡初始化大模型时的显存使用.
|
||||
|
||||
如果你的模型有 `N` billion 个参数,你的 GPU 内存为 `M` GB, 当 `4N >= M` 时,我们推荐使用 LazyInitContext。否则,LazyInitContext 是可选的。
|
||||
|
||||
<!--- doc-test-ignore-start -->
|
||||
```python
|
||||
with ColoInitContext(device='cpu', default_dist_spec=default_dist_spec, default_pg=default_pg):
|
||||
with LazyInitContext(default_device=torch.device('cuda')):
|
||||
model = gpt2_medium(checkpoint=True)
|
||||
```
|
||||
<!--- doc-test-ignore-end -->
|
||||
|
||||
定义模型参数如下:
|
||||
我们提供了 `Booster` API,它用户友好。我们推荐你使用 `Booster` API。如果您仍然想使用底层 API,您可以继续阅读本节其他内容。
|
||||
|
||||
使用 `GeminiDDP` 包装模型。
|
||||
|
||||
<!--- doc-test-ignore-start -->
|
||||
```python
|
||||
chunk_manager = init_chunk_manager(model=module,
|
||||
init_device=device,
|
||||
hidden_dim=hidden_dim,
|
||||
search_range_m=search_range_m,
|
||||
min_chunk_size_m=min_chunk_size_m)
|
||||
gemini_manager = GeminiManager(placement_policy, chunk_manager)
|
||||
model = ZeroDDP(model, gemini_manager)
|
||||
model = GeminiDDP(model, hidden_dim=hidden_dim, min_chunk_size_m=min_chunk_size_m)
|
||||
```
|
||||
<!--- doc-test-ignore-end -->
|
||||
|
||||
`hidden dim`是DNN的隐藏维度。用户可以提供这个参数来加快搜索速度。如果用户在训练前不知道这个参数也可以。 我们将使用默认值 1024。`min_chunk_size_m`是以兆(2^20)为单位的最小块大小。如果参数的总大小仍然小于最小块大小,则所有参数将被压缩为一个小块。
|
||||
|
||||
初始化优化器。
|
||||
<!--- doc-test-ignore-start -->
|
||||
```python
|
||||
optimizer = GeminiAdamOptimizer(model, lr=1e-3, initial_scale=2**5)
|
||||
```
|
||||
<!--- doc-test-ignore-end -->
|
||||
|
||||
<!--- doc-test-ignore-start -->
|
||||
训练
|
||||
```python
|
||||
optimizer.zero_grad()
|
||||
|
@ -87,6 +92,7 @@ loss = criterion(outputs, input_ids)
|
|||
optimizer.backward(loss)
|
||||
optimizer.step()
|
||||
```
|
||||
<!--- doc-test-ignore-end -->
|
||||
> ⚠️ 注意:请不要使用`loss.backward()`,规范写法是`optimizer.backward(loss)`。
|
||||
|
||||
### 训练GPT
|
||||
|
@ -143,47 +149,6 @@ class GPTLMLoss(nn.Module):
|
|||
return self.loss_fn(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
||||
```
|
||||
|
||||
定义张量并行和参数分片策略:
|
||||
|
||||
```python
|
||||
def tensor_parallelize(model: torch.nn.Module, pg: ProcessGroup):
|
||||
for mn, module in model.named_modules():
|
||||
for pn, param in module.named_parameters(recurse=False):
|
||||
if hasattr(param, 'visited'):
|
||||
continue
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
if 'mlp.c_fc' in mn:
|
||||
if 'weight' in pn or 'bias' in pn:
|
||||
split_param_col_tp1d(param, pg)
|
||||
param.compute_spec.set_output_replicate(False)
|
||||
else:
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
elif 'mlp.c_proj' in mn:
|
||||
if 'weight' in pn:
|
||||
split_param_row_tp1d(param, pg)
|
||||
else:
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
elif 'wte' in mn or 'wpe' in mn:
|
||||
split_param_col_tp1d(param, pg)
|
||||
elif 'c_attn' in mn or 'c_proj' in mn:
|
||||
split_param_col_tp1d(param, pg)
|
||||
else:
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
|
||||
param.visited = True
|
||||
def split_param_single_dim_tp1d(dim: int, param: ColoParameter, pg: ProcessGroup):
|
||||
spec = (ShardSpec([dim], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
|
||||
param.set_tensor_spec(*spec)
|
||||
|
||||
|
||||
def split_param_row_tp1d(param: ColoParameter, pg: ProcessGroup):
|
||||
split_param_single_dim_tp1d(0, param, pg)
|
||||
|
||||
|
||||
def split_param_col_tp1d(param: ColoParameter, pg: ProcessGroup):
|
||||
split_param_single_dim_tp1d(-1, param, pg)
|
||||
```
|
||||
|
||||
写一个获得随机输入的函数:
|
||||
|
||||
```python
|
||||
|
@ -200,7 +165,7 @@ def get_data(batch_size, seq_len, vocab_size):
|
|||
from colossalai.nn.optimizer import HybridAdam
|
||||
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.zero import ColoInitContext
|
||||
from colossalai.lazy import LazyInitContext
|
||||
from colossalai.booster.plugin import GeminiPlugin
|
||||
|
||||
def main():
|
||||
|
@ -216,17 +181,13 @@ def main():
|
|||
optimizer = HybridAdam(model.parameters(), lr=0.001)
|
||||
|
||||
torch.manual_seed(123)
|
||||
default_pg = ProcessGroup(tp_degree=args.tp_degree)
|
||||
default_dist_spec = ShardSpec([-1], [args.tp_degree])
|
||||
# build GPT model
|
||||
with ColoInitContext(device='cpu', default_dist_spec=default_dist_spec, default_pg=default_pg):
|
||||
with ColoInitContext(default_device=torch.device('cuda')):
|
||||
model = gpt2_medium(checkpoint=True)
|
||||
pg = default_pg
|
||||
# Tensor Parallelism (TP)
|
||||
tensor_parallelize(model, pg)
|
||||
|
||||
# Gemini + ZeRO DP, Note it must be used after TP
|
||||
plugin = GeminiPlugin(placement_policy='cuda', max_norm=1.0, initial_scale=2**5)
|
||||
|
||||
# Gemini + ZeRO DP
|
||||
plugin = GeminiPlugin(max_norm=1.0, initial_scale=2**5)
|
||||
booster = Booster(plugin=plugin)
|
||||
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ from colossalai.nn.parallel import GeminiDDP, zero_model_wrapper, zero_optim_wra
|
|||
from colossalai.tensor import ColoParameter, ComputePattern, ComputeSpec, ProcessGroup, ReplicaSpec, ShardSpec
|
||||
from colossalai.utils import get_current_device
|
||||
from colossalai.utils.model.colo_init_context import ColoInitContext
|
||||
from colossalai.zero import ZeroOptimizer
|
||||
from colossalai.zero import GeminiOptimizer
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -46,7 +46,7 @@ def main():
|
|||
args.local_rank = -1
|
||||
args.log_interval = 1
|
||||
else:
|
||||
colossalai.launch_from_torch(config={}) #args.colossal_config
|
||||
colossalai.launch_from_torch(config={}) # args.colossal_config
|
||||
args.local_rank = int(os.environ["LOCAL_RANK"])
|
||||
logger.info(
|
||||
f'launch_from_torch, world size: {torch.distributed.get_world_size()} | ' +
|
||||
|
@ -123,7 +123,8 @@ def main():
|
|||
get_tflops_func = partial(get_tflops, numel, args.train_micro_batch_size_per_gpu, args.max_seq_length)
|
||||
|
||||
# 144003367 is is the length of the entire dataset
|
||||
steps_per_epoch = 144003367 // world_size // args.train_micro_batch_size_per_gpu // args.gradient_accumulation_steps // args.refresh_bucket_size #len(dataloader)
|
||||
# len(dataloader)
|
||||
steps_per_epoch = 144003367 // world_size // args.train_micro_batch_size_per_gpu // args.gradient_accumulation_steps // args.refresh_bucket_size
|
||||
total_steps = steps_per_epoch * args.epoch
|
||||
|
||||
lr_scheduler = get_lr_scheduler(optimizer, total_steps=total_steps, last_epoch=-1)
|
||||
|
|
|
@ -7,7 +7,7 @@ imageio-ffmpeg==0.4.2
|
|||
torchmetrics==0.7
|
||||
omegaconf==2.1.1
|
||||
test-tube>=0.7.5
|
||||
streamlit>=0.73.1
|
||||
streamlit>=1.11.1
|
||||
einops==0.3.0
|
||||
transformers
|
||||
webdataset==0.2.5
|
||||
|
|
|
@ -20,6 +20,5 @@ for plugin in "gemini"; do
|
|||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--test_run=True \
|
||||
--num_class_images=200 \
|
||||
--placement="auto" # "cuda"
|
||||
--num_class_images=200
|
||||
done
|
||||
|
|
|
@ -2,9 +2,9 @@ import argparse
|
|||
import hashlib
|
||||
import math
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
import shutil
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
@ -19,6 +19,8 @@ from tqdm.auto import tqdm
|
|||
from transformers import AutoTokenizer, PretrainedConfig
|
||||
|
||||
import colossalai
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
|
||||
from colossalai.context.parallel_mode import ParallelMode
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
|
@ -26,8 +28,6 @@ from colossalai.nn.optimizer import HybridAdam
|
|||
from colossalai.utils import get_current_device
|
||||
from colossalai.zero import ColoInitContext
|
||||
from colossalai.zero.gemini import get_static_torch_model
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
|
||||
|
||||
disable_existing_loggers()
|
||||
logger = get_dist_logger()
|
||||
|
@ -138,10 +138,10 @@ def parse_args(input_args=None):
|
|||
" resolution"),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--placement",
|
||||
type=str,
|
||||
default="cpu",
|
||||
help="Placement Policy for Gemini. Valid when using colossalai as dist plan.",
|
||||
"--offload_optim_frac",
|
||||
type=float,
|
||||
default=1.0,
|
||||
help="Fraction of optimizer states to be offloaded. Valid when using colossalai as dist plan.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--center_crop",
|
||||
|
@ -461,18 +461,17 @@ def main(args):
|
|||
revision=args.revision,
|
||||
)
|
||||
|
||||
|
||||
if args.externel_unet_path is None:
|
||||
logger.info(f"Loading UNet2DConditionModel from {args.pretrained_model_name_or_path}", ranks=[0])
|
||||
unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path,
|
||||
subfolder="unet",
|
||||
revision=args.revision,
|
||||
low_cpu_mem_usage=False)
|
||||
subfolder="unet",
|
||||
revision=args.revision,
|
||||
low_cpu_mem_usage=False)
|
||||
else:
|
||||
logger.info(f"Loading UNet2DConditionModel from {args.externel_unet_path}", ranks=[0])
|
||||
unet = UNet2DConditionModel.from_pretrained(args.externel_unet_path,
|
||||
revision=args.revision,
|
||||
low_cpu_mem_usage=False)
|
||||
revision=args.revision,
|
||||
low_cpu_mem_usage=False)
|
||||
|
||||
vae.requires_grad_(False)
|
||||
text_encoder.requires_grad_(False)
|
||||
|
@ -491,30 +490,31 @@ def main(args):
|
|||
if args.plugin.startswith('torch_ddp'):
|
||||
plugin = TorchDDPPlugin()
|
||||
elif args.plugin == 'gemini':
|
||||
plugin = GeminiPlugin(placement_policy=args.placement, strict_ddp_mode=True, initial_scale=2 ** 5)
|
||||
plugin = GeminiPlugin(offload_optim_frac=args.offload_optim_frac, strict_ddp_mode=True, initial_scale=2**5)
|
||||
elif args.plugin == 'low_level_zero':
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2 ** 5)
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2**5)
|
||||
|
||||
booster = Booster(plugin=plugin, **booster_kwargs)
|
||||
|
||||
# config optimizer for colossalai zero
|
||||
optimizer = HybridAdam(unet.parameters(), lr=args.learning_rate, initial_scale=2**5, clipping_norm=args.max_grad_norm)
|
||||
optimizer = HybridAdam(unet.parameters(),
|
||||
lr=args.learning_rate,
|
||||
initial_scale=2**5,
|
||||
clipping_norm=args.max_grad_norm)
|
||||
|
||||
# load noise_scheduler
|
||||
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
|
||||
|
||||
# prepare dataset
|
||||
logger.info(f"Prepare dataset from {args.instance_data_dir}", ranks=[0])
|
||||
train_dataset = DreamBoothDataset(
|
||||
instance_data_root=args.instance_data_dir,
|
||||
instance_prompt=args.instance_prompt,
|
||||
class_data_root=args.class_data_dir if args.with_prior_preservation else None,
|
||||
class_prompt=args.class_prompt,
|
||||
tokenizer=tokenizer,
|
||||
size=args.resolution,
|
||||
center_crop=args.center_crop,
|
||||
test=args.test_run
|
||||
)
|
||||
train_dataset = DreamBoothDataset(instance_data_root=args.instance_data_dir,
|
||||
instance_prompt=args.instance_prompt,
|
||||
class_data_root=args.class_data_dir if args.with_prior_preservation else None,
|
||||
class_prompt=args.class_prompt,
|
||||
tokenizer=tokenizer,
|
||||
size=args.resolution,
|
||||
center_crop=args.center_crop,
|
||||
test=args.test_run)
|
||||
|
||||
def collate_fn(examples):
|
||||
input_ids = [example["instance_prompt_ids"] for example in examples]
|
||||
|
@ -690,6 +690,7 @@ def main(args):
|
|||
if args.push_to_hub:
|
||||
repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
main(args)
|
||||
|
|
|
@ -2,9 +2,9 @@ import argparse
|
|||
import hashlib
|
||||
import math
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
import shutil
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
@ -21,6 +21,8 @@ from tqdm.auto import tqdm
|
|||
from transformers import AutoTokenizer, PretrainedConfig
|
||||
|
||||
import colossalai
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
|
||||
from colossalai.context.parallel_mode import ParallelMode
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
|
@ -28,8 +30,6 @@ from colossalai.nn.optimizer import HybridAdam
|
|||
from colossalai.utils import get_current_device
|
||||
from colossalai.zero import ColoInitContext, GeminiAdamOptimizer
|
||||
from colossalai.zero.gemini import get_static_torch_model
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
|
||||
|
||||
disable_existing_loggers()
|
||||
logger = get_dist_logger()
|
||||
|
@ -459,18 +459,17 @@ def main(args):
|
|||
revision=args.revision,
|
||||
)
|
||||
|
||||
|
||||
if args.externel_unet_path is None:
|
||||
logger.info(f"Loading UNet2DConditionModel from {args.pretrained_model_name_or_path}", ranks=[0])
|
||||
unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path,
|
||||
subfolder="unet",
|
||||
revision=args.revision,
|
||||
low_cpu_mem_usage=False)
|
||||
subfolder="unet",
|
||||
revision=args.revision,
|
||||
low_cpu_mem_usage=False)
|
||||
else:
|
||||
logger.info(f"Loading UNet2DConditionModel from {args.externel_unet_path}", ranks=[0])
|
||||
unet = UNet2DConditionModel.from_pretrained(args.externel_unet_path,
|
||||
revision=args.revision,
|
||||
low_cpu_mem_usage=False)
|
||||
revision=args.revision,
|
||||
low_cpu_mem_usage=False)
|
||||
unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path,
|
||||
subfolder="unet",
|
||||
revision=args.revision,
|
||||
|
@ -490,8 +489,7 @@ def main(args):
|
|||
block_id = int(name[len("down_blocks.")])
|
||||
hidden_size = unet.config.block_out_channels[block_id]
|
||||
|
||||
lora_attn_procs[name] = LoRACrossAttnProcessor(hidden_size=hidden_size,
|
||||
cross_attention_dim=cross_attention_dim)
|
||||
lora_attn_procs[name] = LoRACrossAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim)
|
||||
|
||||
unet.set_attn_processor(lora_attn_procs)
|
||||
lora_layers = AttnProcsLayers(unet.attn_processors)
|
||||
|
@ -513,14 +511,17 @@ def main(args):
|
|||
if args.plugin.startswith('torch_ddp'):
|
||||
plugin = TorchDDPPlugin()
|
||||
elif args.plugin == 'gemini':
|
||||
plugin = GeminiPlugin(placement_policy='cuda', strict_ddp_mode=True, initial_scale=2 ** 5)
|
||||
plugin = GeminiPlugin(strict_ddp_mode=True, initial_scale=2**5)
|
||||
elif args.plugin == 'low_level_zero':
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2 ** 5)
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2**5)
|
||||
|
||||
booster = Booster(plugin=plugin, **booster_kwargs)
|
||||
|
||||
# config optimizer for colossalai zero
|
||||
optimizer = HybridAdam(unet.parameters(), lr=args.learning_rate, initial_scale=2**5, clipping_norm=args.max_grad_norm)
|
||||
optimizer = HybridAdam(unet.parameters(),
|
||||
lr=args.learning_rate,
|
||||
initial_scale=2**5,
|
||||
clipping_norm=args.max_grad_norm)
|
||||
|
||||
# load noise_scheduler
|
||||
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
|
||||
|
@ -711,6 +712,7 @@ def main(args):
|
|||
if args.push_to_hub:
|
||||
repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
main(args)
|
||||
|
|
|
@ -49,8 +49,8 @@ python eval.py -c ./ckpt-low_level_zero -e 80
|
|||
|
||||
Expected accuracy performance will be:
|
||||
|
||||
| Model | Single-GPU Baseline FP32 | Booster DDP with FP32 | Booster DDP with FP16 | Booster Low Level Zero |
|
||||
| --------- | ------------------------ | --------------------- | --------------------- | ---------------------- |
|
||||
| ResNet-18 | 85.85% | 84.91% | 85.46% | 84.50% |
|
||||
| Model | Single-GPU Baseline FP32 | Booster DDP with FP32 | Booster DDP with FP16 | Booster Low Level Zero | Booster Gemini |
|
||||
| --------- | ------------------------ | --------------------- | --------------------- | ---------------------- | -------------- |
|
||||
| ResNet-18 | 85.85% | 84.91% | 85.46% | 84.50% | 84.60% |
|
||||
|
||||
**Note: the baseline is adapted from the [script](https://pytorch-tutorial.readthedocs.io/en/latest/tutorial/chapter03_intermediate/3_2_2_cnn_resnet_cifar10/) to use `torchvision.models.resnet18`**
|
||||
|
|
|
@ -104,7 +104,7 @@ def main():
|
|||
'--plugin',
|
||||
type=str,
|
||||
default='torch_ddp',
|
||||
choices=['torch_ddp', 'torch_ddp_fp16', 'low_level_zero'],
|
||||
choices=['torch_ddp', 'torch_ddp_fp16', 'low_level_zero', 'gemini'],
|
||||
help="plugin to use")
|
||||
parser.add_argument('-r', '--resume', type=int, default=-1, help="resume from the epoch's checkpoint")
|
||||
parser.add_argument('-c', '--checkpoint', type=str, default='./checkpoint', help="checkpoint directory")
|
||||
|
@ -141,7 +141,7 @@ def main():
|
|||
if args.plugin.startswith('torch_ddp'):
|
||||
plugin = TorchDDPPlugin()
|
||||
elif args.plugin == 'gemini':
|
||||
plugin = GeminiPlugin(placement_policy='cuda', strict_ddp_mode=True, initial_scale=2**5)
|
||||
plugin = GeminiPlugin(initial_scale=2**5)
|
||||
elif args.plugin == 'low_level_zero':
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2**5)
|
||||
|
||||
|
|
|
@ -1,19 +1,18 @@
|
|||
import time
|
||||
|
||||
import torch
|
||||
import transformers
|
||||
from transformers import ViTConfig, ViTForImageClassification
|
||||
import tqdm
|
||||
import transformers
|
||||
from args import parse_benchmark_args
|
||||
from transformers import ViTConfig, ViTForImageClassification
|
||||
|
||||
import colossalai
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.utils import get_current_device
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
|
||||
from colossalai.cluster import DistCoordinator
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
|
||||
from args import parse_benchmark_args
|
||||
|
||||
def format_num(num: int, bytes=False):
|
||||
"""Scale bytes to its proper format, e.g. 1253656 => '1.20MB'"""
|
||||
|
@ -26,8 +25,13 @@ def format_num(num: int, bytes=False):
|
|||
|
||||
|
||||
def get_data(batch_size, num_labels, num_channels=3, height=224, width=224):
|
||||
pixel_values = torch.randn(batch_size, num_channels, height, width, device=torch.cuda.current_device(), dtype=torch.float)
|
||||
labels = torch.randint(0, num_labels, (batch_size, ), device=torch.cuda.current_device(), dtype=torch.int64)
|
||||
pixel_values = torch.randn(batch_size,
|
||||
num_channels,
|
||||
height,
|
||||
width,
|
||||
device=torch.cuda.current_device(),
|
||||
dtype=torch.float)
|
||||
labels = torch.randint(0, num_labels, (batch_size,), device=torch.cuda.current_device(), dtype=torch.int64)
|
||||
return pixel_values, labels
|
||||
|
||||
|
||||
|
@ -55,11 +59,11 @@ def main():
|
|||
transformers.utils.logging.set_verbosity_info()
|
||||
else:
|
||||
transformers.utils.logging.set_verbosity_error()
|
||||
|
||||
|
||||
# Whether to set limit on memory capacity
|
||||
if args.mem_cap > 0:
|
||||
colo_memory_cap(args.mem_cap)
|
||||
|
||||
|
||||
# Build ViT model
|
||||
config = ViTConfig.from_pretrained(args.model_name_or_path)
|
||||
model = ViTForImageClassification(config)
|
||||
|
@ -75,11 +79,7 @@ def main():
|
|||
if args.plugin.startswith('torch_ddp'):
|
||||
plugin = TorchDDPPlugin()
|
||||
elif args.plugin == 'gemini':
|
||||
plugin = GeminiPlugin(device=get_current_device(),
|
||||
placement_policy='cpu',
|
||||
pin_memory=True,
|
||||
strict_ddp_mode=True,
|
||||
initial_scale=2**5)
|
||||
plugin = GeminiPlugin(offload_optim_frac=1.0, pin_memory=True, initial_scale=2**5)
|
||||
elif args.plugin == 'low_level_zero':
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2**5)
|
||||
logger.info(f"Set plugin as {args.plugin}", ranks=[0])
|
||||
|
@ -90,16 +90,15 @@ def main():
|
|||
# Set booster
|
||||
booster = Booster(plugin=plugin, **booster_kwargs)
|
||||
model, optimizer, _, _, _ = booster.boost(model, optimizer)
|
||||
|
||||
|
||||
# Start training.
|
||||
logger.info(f"Start testing", ranks=[0])
|
||||
progress_bar = tqdm.tqdm(total=args.max_train_steps, desc="Training Step", disable=not coordinator.is_master())
|
||||
|
||||
|
||||
torch.cuda.synchronize()
|
||||
model.train()
|
||||
start_time = time.time()
|
||||
|
||||
|
||||
for _ in range(args.max_train_steps):
|
||||
|
||||
pixel_values, labels = get_data(args.batch_size, args.num_labels, 3, 224, 224)
|
||||
|
@ -111,18 +110,19 @@ def main():
|
|||
|
||||
torch.cuda.synchronize()
|
||||
progress_bar.update(1)
|
||||
|
||||
# Compute Statistics
|
||||
|
||||
# Compute Statistics
|
||||
end_time = time.time()
|
||||
throughput = "{:.4f}".format((world_size * args.max_train_steps * args.batch_size) / (end_time - start_time))
|
||||
max_mem = format_num(torch.cuda.max_memory_allocated(device=torch.cuda.current_device()), bytes=True)
|
||||
|
||||
logger.info(f"Testing finished, "
|
||||
f"batch size per gpu: {args.batch_size}, "
|
||||
f"plugin: {args.plugin}, "
|
||||
f"throughput: {throughput}, "
|
||||
f"maximum memory usage per gpu: {max_mem}.",
|
||||
ranks=[0])
|
||||
|
||||
logger.info(
|
||||
f"Testing finished, "
|
||||
f"batch size per gpu: {args.batch_size}, "
|
||||
f"plugin: {args.plugin}, "
|
||||
f"throughput: {throughput}, "
|
||||
f"maximum memory usage per gpu: {max_mem}.",
|
||||
ranks=[0])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,20 +1,19 @@
|
|||
import torch
|
||||
import torch.distributed as dist
|
||||
import transformers
|
||||
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor
|
||||
from args import parse_demo_args
|
||||
from data import BeansDataset, beans_collator
|
||||
from tqdm import tqdm
|
||||
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor
|
||||
|
||||
import colossalai
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.utils import get_current_device
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
|
||||
from colossalai.cluster import DistCoordinator
|
||||
|
||||
from args import parse_demo_args
|
||||
from data import BeansDataset, beans_collator
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.utils import get_current_device
|
||||
|
||||
|
||||
def move_to_cuda(batch, device):
|
||||
|
@ -22,12 +21,12 @@ def move_to_cuda(batch, device):
|
|||
|
||||
|
||||
def train_epoch(epoch, model, optimizer, lr_scheduler, dataloader, booster, coordinator):
|
||||
|
||||
|
||||
torch.cuda.synchronize()
|
||||
model.train()
|
||||
|
||||
with tqdm(dataloader, desc=f'Epoch [{epoch + 1}]', disable=not coordinator.is_master()) as pbar:
|
||||
|
||||
|
||||
for batch in pbar:
|
||||
|
||||
# Foward
|
||||
|
@ -47,7 +46,7 @@ def train_epoch(epoch, model, optimizer, lr_scheduler, dataloader, booster, coor
|
|||
|
||||
@torch.no_grad()
|
||||
def evaluate_model(epoch, model, eval_dataloader, num_labels, coordinator):
|
||||
|
||||
|
||||
model.eval()
|
||||
accum_loss = torch.zeros(1, device=get_current_device())
|
||||
total_num = torch.zeros(1, device=get_current_device())
|
||||
|
@ -76,9 +75,7 @@ def evaluate_model(epoch, model, eval_dataloader, num_labels, coordinator):
|
|||
print(f"Evaluation result for epoch {epoch + 1}: \
|
||||
average_loss={avg_loss}, \
|
||||
accuracy={accuracy}.")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
|
@ -102,14 +99,13 @@ def main():
|
|||
train_dataset = BeansDataset(image_processor, split='train')
|
||||
eval_dataset = BeansDataset(image_processor, split='validation')
|
||||
|
||||
|
||||
# Load pretrained ViT model
|
||||
config = ViTConfig.from_pretrained(args.model_name_or_path)
|
||||
config.num_labels = train_dataset.num_labels
|
||||
config.id2label = {str(i): c for i, c in enumerate(train_dataset.label_names)}
|
||||
config.label2id = {c: str(i) for i, c in enumerate(train_dataset.label_names)}
|
||||
model = ViTForImageClassification.from_pretrained(args.model_name_or_path,
|
||||
config=config,
|
||||
model = ViTForImageClassification.from_pretrained(args.model_name_or_path,
|
||||
config=config,
|
||||
ignore_mismatched_sizes=True)
|
||||
logger.info(f"Finish loading model from {args.model_name_or_path}", ranks=[0])
|
||||
|
||||
|
@ -123,26 +119,22 @@ def main():
|
|||
if args.plugin.startswith('torch_ddp'):
|
||||
plugin = TorchDDPPlugin()
|
||||
elif args.plugin == 'gemini':
|
||||
plugin = GeminiPlugin(device=get_current_device(),
|
||||
placement_policy='cpu',
|
||||
pin_memory=True,
|
||||
strict_ddp_mode=True,
|
||||
initial_scale=2**5)
|
||||
plugin = GeminiPlugin(offload_optim_frac=1.0, pin_memory=True, initial_scale=2**5)
|
||||
elif args.plugin == 'low_level_zero':
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2**5)
|
||||
logger.info(f"Set plugin as {args.plugin}", ranks=[0])
|
||||
|
||||
# Prepare dataloader
|
||||
train_dataloader = plugin.prepare_dataloader(train_dataset,
|
||||
batch_size=args.batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
collate_fn=beans_collator)
|
||||
batch_size=args.batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
collate_fn=beans_collator)
|
||||
eval_dataloader = plugin.prepare_dataloader(eval_dataset,
|
||||
batch_size=args.batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
collate_fn=beans_collator)
|
||||
batch_size=args.batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
collate_fn=beans_collator)
|
||||
|
||||
# Set optimizer
|
||||
optimizer = HybridAdam(model.parameters(), lr=(args.learning_rate * world_size), weight_decay=args.weight_decay)
|
||||
|
@ -156,11 +148,11 @@ def main():
|
|||
|
||||
# Set booster
|
||||
booster = Booster(plugin=plugin, **booster_kwargs)
|
||||
model, optimizer, _, train_dataloader, lr_scheduler = booster.boost(model=model,
|
||||
optimizer=optimizer,
|
||||
dataloader=train_dataloader,
|
||||
lr_scheduler=lr_scheduler)
|
||||
|
||||
model, optimizer, _, train_dataloader, lr_scheduler = booster.boost(model=model,
|
||||
optimizer=optimizer,
|
||||
dataloader=train_dataloader,
|
||||
lr_scheduler=lr_scheduler)
|
||||
|
||||
# Finetuning
|
||||
logger.info(f"Start finetuning", ranks=[0])
|
||||
for epoch in range(args.num_epoch):
|
||||
|
@ -174,4 +166,4 @@ def main():
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
|
|
@ -7,6 +7,14 @@ This directory includes two parts: Using the Booster API finetune Huggingface Be
|
|||
bash test_ci.sh
|
||||
```
|
||||
|
||||
### Results on 2-GPU
|
||||
|
||||
| Plugin | Accuracy | F1-score |
|
||||
| -------------- | -------- | -------- |
|
||||
| torch_ddp | 84.4% | 88.6% |
|
||||
| torch_ddp_fp16 | 84.7% | 88.8% |
|
||||
| gemini | 84.0% | 88.4% |
|
||||
|
||||
## Benchmark
|
||||
```
|
||||
bash benchmark.sh
|
||||
|
@ -14,9 +22,9 @@ bash benchmark.sh
|
|||
|
||||
Now include these metrics in benchmark: CUDA mem occupy, throughput and the number of model parameters. If you have custom metrics, you can add them to benchmark_util.
|
||||
|
||||
## Results
|
||||
### Results
|
||||
|
||||
### Bert
|
||||
#### Bert
|
||||
|
||||
| | max cuda mem | throughput(sample/s) | params |
|
||||
| :-----| -----------: | :--------: | :----: |
|
||||
|
@ -25,10 +33,10 @@ Now include these metrics in benchmark: CUDA mem occupy, throughput and the numb
|
|||
| gemini | 11.0 GB | 12.9 | 82M |
|
||||
| low_level_zero | 11.29 G | 14.7 | 82M |
|
||||
|
||||
### AlBert
|
||||
#### AlBert
|
||||
| | max cuda mem | throughput(sample/s) | params |
|
||||
| :-----| -----------: | :--------: | :----: |
|
||||
| ddp | OOM | | |
|
||||
| ddp_fp16 | OOM | | |
|
||||
| gemini | 69.39 G | 1.3 | 208M |
|
||||
| low_level_zero | 56.89 G | 1.4 | 208M |
|
||||
| low_level_zero | 56.89 G | 1.4 | 208M |
|
||||
|
|
|
@ -219,7 +219,7 @@ def main():
|
|||
if args.plugin.startswith('torch_ddp'):
|
||||
plugin = TorchDDPPlugin()
|
||||
elif args.plugin == 'gemini':
|
||||
plugin = GeminiPlugin(placement_policy='cuda', strict_ddp_mode=True, initial_scale=2**5)
|
||||
plugin = GeminiPlugin(initial_scale=2**5)
|
||||
elif args.plugin == 'low_level_zero':
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2**5)
|
||||
elif args.plugin == 'hybrid_parallel':
|
||||
|
|
|
@ -4,9 +4,6 @@ export DISTPLAN=${DISTPLAN:-"CAI_Gemini"}
|
|||
|
||||
# The following options only valid when DISTPLAN="colossalai"
|
||||
export GPUNUM=${GPUNUM:-1}
|
||||
export TPDEGREE=${TPDEGREE:-1}
|
||||
export PLACEMENT=${PLACEMENT:-"cpu"}
|
||||
export USE_SHARD_INIT=${USE_SHARD_INIT:-False}
|
||||
export BATCH_SIZE=${BATCH_SIZE:-16}
|
||||
export MODEL_TYPE=${MODEL_TYPE:-"gpt2_medium"}
|
||||
export TRAIN_STEP=${TRAIN_STEP:-10}
|
||||
|
@ -21,11 +18,8 @@ fi
|
|||
mkdir -p gemini_logs
|
||||
|
||||
torchrun --standalone --nproc_per_node=${GPUNUM} ./train_gpt_demo.py \
|
||||
--tp_degree=${TPDEGREE} \
|
||||
--model_type=${MODEL_TYPE} \
|
||||
--batch_size=${BATCH_SIZE} \
|
||||
--placement=${PLACEMENT} \
|
||||
${USE_SHARD_INIT} \
|
||||
--distplan=${DISTPLAN} \
|
||||
--train_step=${TRAIN_STEP} \
|
||||
2>&1 | tee ./gemini_logs/${MODEL_TYPE}_${DISTPLAN}_gpu_${GPUNUM}_bs_${BATCH_SIZE}_tp_${TPDEGREE}_${PLACEMENT}.log
|
||||
|
|
|
@ -6,29 +6,17 @@ for MODEL_TYPE in "gpt2_medium"; do
|
|||
for DISTPLAN in "CAI_Gemini"; do
|
||||
for BATCH_SIZE in 2; do
|
||||
for GPUNUM in 1 4; do
|
||||
for TPDEGREE in 1 2; do
|
||||
if [ ${TPDEGREE} -gt ${GPUNUM} ]; then
|
||||
continue
|
||||
fi
|
||||
for PLACEMENT in "cpu" "auto"; do
|
||||
MODEL_TYPE=${MODEL_TYPE} DISTPLAN=${DISTPLAN} BATCH_SIZE=${BATCH_SIZE} GPUNUM=${GPUNUM} TPDEGREE=${TPDEGREE} PLACEMENT=${PLACEMENT} \
|
||||
bash ./run_gemini.sh
|
||||
done
|
||||
done
|
||||
MODEL_TYPE=${MODEL_TYPE} DISTPLAN=${DISTPLAN} BATCH_SIZE=${BATCH_SIZE} GPUNUM=${GPUNUM} \
|
||||
bash ./run_gemini.sh
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
for DISTPLAN in "zero1" "zero2"; do
|
||||
for DISTPLAN in "CAI_ZeRO2" "CAI_ZeRO1"; do
|
||||
for BATCH_SIZE in 2; do
|
||||
for GPUNUM in 1 4; do
|
||||
for TPDEGREE in 1; do
|
||||
if [ ${TPDEGREE} -gt ${GPUNUM} ]; then
|
||||
continue
|
||||
fi
|
||||
MODEL_TYPE=${MODEL_TYPE} DISTPLAN=${DISTPLAN} BATCH_SIZE=${BATCH_SIZE} GPUNUM=${GPUNUM} TPDEGREE=${TPDEGREE}\
|
||||
bash ./run_gemini.sh
|
||||
done
|
||||
MODEL_TYPE=${MODEL_TYPE} DISTPLAN=${DISTPLAN} BATCH_SIZE=${BATCH_SIZE} GPUNUM=${GPUNUM} \
|
||||
bash ./run_gemini.sh
|
||||
done
|
||||
done
|
||||
done
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
from contextlib import nullcontext
|
||||
from functools import partial
|
||||
from time import time
|
||||
|
||||
|
@ -13,11 +14,10 @@ from torch.nn.parallel import DistributedDataParallel as DDP
|
|||
import colossalai
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
|
||||
from colossalai.lazy import LazyInitContext
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.tensor import ColoParameter, ComputePattern, ComputeSpec, ProcessGroup, ReplicaSpec, ShardSpec
|
||||
from colossalai.utils import get_current_device
|
||||
from colossalai.zero import ColoInitContext
|
||||
|
||||
CAI_VERSION = colossalai.__version__
|
||||
|
||||
|
@ -30,24 +30,6 @@ def parse_args():
|
|||
default='CAI_Gemini',
|
||||
help="The distributed plan [colossalai, zero1, zero2, torch_ddp, torch_zero].",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--tp_degree",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Tensor Parallelism Degree. Valid when using colossalai as dist plan.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--placement",
|
||||
type=str,
|
||||
default='cpu',
|
||||
help="Placement Policy for Gemini. Valid when using colossalai as dist plan.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--shardinit",
|
||||
action='store_true',
|
||||
help=
|
||||
"Shard the tensors when init the model to shrink peak memory size on the assigned device. Valid when using colossalai as dist plan.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--batch_size",
|
||||
type=int,
|
||||
|
@ -71,20 +53,6 @@ def parse_args():
|
|||
return args
|
||||
|
||||
|
||||
# Parameter Sharding Strategies for Tensor Parallelism
|
||||
def split_param_single_dim_tp1d(dim: int, param: ColoParameter, pg: ProcessGroup):
|
||||
spec = (ShardSpec([dim], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
|
||||
param.set_tensor_spec(*spec)
|
||||
|
||||
|
||||
def split_param_row_tp1d(param: ColoParameter, pg: ProcessGroup):
|
||||
split_param_single_dim_tp1d(0, param, pg)
|
||||
|
||||
|
||||
def split_param_col_tp1d(param: ColoParameter, pg: ProcessGroup):
|
||||
split_param_single_dim_tp1d(-1, param, pg)
|
||||
|
||||
|
||||
class GPTLMLoss(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
|
@ -140,47 +108,6 @@ def set_cpu_maximum_parallelism():
|
|||
print(f"environmental variable OMP_NUM_THREADS is set to {max_concurrency}.")
|
||||
|
||||
|
||||
# Tensor Parallel
|
||||
def tensor_parallelize(model: torch.nn.Module, pg: ProcessGroup):
|
||||
"""tensor_parallelize
|
||||
Sharding the Model Parameters.
|
||||
|
||||
Args:
|
||||
model (torch.nn.Module): a torch module to be sharded
|
||||
"""
|
||||
for mn, module in model.named_modules():
|
||||
for pn, param in module.named_parameters(recurse=False):
|
||||
# NOTE() a param maybe shared by two modules
|
||||
if hasattr(param, 'visited'):
|
||||
continue
|
||||
|
||||
# if shard init, then convert param to replica and use the dp-only ProcessGroup
|
||||
param: ColoParameter = param
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
param.set_process_group(pg)
|
||||
|
||||
# shard it w.r.t tp pattern
|
||||
if 'mlp.c_fc' in mn:
|
||||
if 'weight' in pn or 'bias' in pn:
|
||||
split_param_col_tp1d(param, pg) # column slice
|
||||
# keep the shape of the output from c_fc
|
||||
param.compute_spec.set_output_replicate(False)
|
||||
else:
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
elif 'mlp.c_proj' in mn:
|
||||
if 'weight' in pn:
|
||||
split_param_row_tp1d(param, pg) # row slice
|
||||
else:
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
elif 'wte' in mn or 'wpe' in mn:
|
||||
split_param_col_tp1d(param, pg) # column slice
|
||||
elif 'c_attn' in mn or 'c_proj' in mn:
|
||||
split_param_col_tp1d(param, pg) # column slice
|
||||
else:
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
param.visited = True
|
||||
|
||||
|
||||
def main():
|
||||
# version check
|
||||
# this example is supposed to work for versions greater than 0.2.0
|
||||
|
@ -213,30 +140,13 @@ def main():
|
|||
|
||||
# build criterion
|
||||
criterion = GPTLMLoss()
|
||||
|
||||
torch.manual_seed(123)
|
||||
if args.distplan.startswith("CAI"):
|
||||
# all param must use the same process group.
|
||||
world_size = torch.distributed.get_world_size()
|
||||
shard_pg = ProcessGroup(tp_degree=world_size) if args.shardinit else None
|
||||
default_dist_spec = ShardSpec([-1], [world_size]) if args.shardinit else None
|
||||
|
||||
if args.shardinit and args.distplan != "CAI_Gemini":
|
||||
raise RuntimeError("You can only use shardinit with CAI_Gemini")
|
||||
|
||||
ctx = LazyInitContext(default_device=get_current_device()) if args.distplan == "CAI_Gemini" else nullcontext()
|
||||
# build GPT model
|
||||
with ColoInitContext(device=get_current_device(),
|
||||
dtype=torch.half,
|
||||
default_dist_spec=default_dist_spec,
|
||||
default_pg=shard_pg):
|
||||
with ctx:
|
||||
model = model_builder(args.model_type)(checkpoint=True)
|
||||
|
||||
tp_pg = ProcessGroup(tp_degree=args.tp_degree)
|
||||
# Tensor Parallelism (TP)
|
||||
# You should notice that v0.1.10 is not compatible with TP degree > 1
|
||||
if args.tp_degree > 1:
|
||||
tensor_parallelize(model, tp_pg)
|
||||
|
||||
# assign running configurations
|
||||
if args.distplan == "CAI_ZeRO1":
|
||||
zero_stage = 1
|
||||
|
@ -254,13 +164,7 @@ def main():
|
|||
overlap_communication=True,
|
||||
verbose=True)
|
||||
elif args.distplan == "CAI_Gemini":
|
||||
plugin = GeminiPlugin(device=get_current_device(),
|
||||
placement_policy=args.placement,
|
||||
pin_memory=True,
|
||||
strict_ddp_mode=args.tp_degree == 1,
|
||||
search_range_m=128,
|
||||
hidden_dim=model.config.n_embd,
|
||||
gpu_margin_mem_ratio=0.)
|
||||
plugin = GeminiPlugin(search_range_m=128, hidden_dim=model.config.n_embd)
|
||||
else:
|
||||
raise RuntimeError
|
||||
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
# Pretraining LLaMA: best practices for building LLaMA-like base models
|
||||
|
||||
<p id="ColossalChat-Speed" align="center">
|
||||
<img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/examples/images/LLaMA_pretraining.png" width=600/>
|
||||
</p>
|
||||
|
||||
- 65-billion-parameter large model pretraining accelerated by 38%
|
||||
[[code]](https://github.com/hpcaitech/ColossalAI/tree/example/llama/examples/language/llama)
|
||||
[[blog]](https://www.hpc-ai.tech/blog/large-model-pretraining)
|
||||
|
||||
> Since the main branch is being updated, in order to maintain the stability of the code, this example is temporarily kept as an [independent branch](https://github.com/hpcaitech/ColossalAI/tree/example/llama/examples/language/llama).
|
|
@ -0,0 +1,194 @@
|
|||
# Pretraining LLaMA-1/2: best practices for building LLaMA-1/2-like base models
|
||||
|
||||
### LLaMA2
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/llama2_pretraining.png" width=600/>
|
||||
</p>
|
||||
|
||||
- 70 billion parameter LLaMA2 model training accelerated by 195%
|
||||
[[code]](https://github.com/hpcaitech/ColossalAI/tree/example/llama/examples/language/llama)
|
||||
[[blog]](https://www.hpc-ai.tech/blog/70b-llama2-training)
|
||||
|
||||
### LLaMA1
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/hpcaitech/public_assets/main/examples/images/LLaMA_pretraining.png" width=600/>
|
||||
</p>
|
||||
|
||||
- 65-billion-parameter large model pretraining accelerated by 38%
|
||||
[[code]](https://github.com/hpcaitech/ColossalAI/tree/example/llama/examples/language/llama)
|
||||
[[blog]](https://www.hpc-ai.tech/blog/large-model-pretraining)
|
||||
|
||||
## Dataset
|
||||
|
||||
Different from the original LLaMA, we use [RedPajama](https://www.together.xyz/blog/redpajama) dataset, which is a reproduction of the LLaMA training dataset containing over 1.2 trillion tokens. The full dataset is ~5TB unzipped on disk and ~3TB to download compressed.
|
||||
|
||||
A smaller, more consumable random sample can be downloaded through [Hugging Face](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T). If you just want to try out the pretraining script, you can use a 1B-token sample subset of RedPajama, which is available at [Hugging Face](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample).
|
||||
|
||||
RedPajama-Data-1T consists of seven data slices:
|
||||
|
||||
| | RedPajama | LLaMA |
|
||||
|---------------|--------------|---------------|
|
||||
| CommonCrawl | 878 billion | 852 billion |
|
||||
| C4 | 175 billion | 190 billion |
|
||||
| Github | 59 billion | 100 billion |
|
||||
| Books | 26 billion | 25 billion |
|
||||
| ArXiv | 28 billion | 33 billion |
|
||||
| Wikipedia | 24 billion | 25 billion |
|
||||
| StackExchange | 20 billion | 27 billion |
|
||||
| Total | 1.2 trillion | 1.25 trillion |
|
||||
|
||||
## Training
|
||||
|
||||
We follow the hyperparameter settings from the original LLaMA paper. We use AdamW with $beta1=0.9$ and $beta2=0.95$. We use a cosine learning rate schedule, such that the final learning rate is equal to 10% of the maximal learning rate. We use a weight decay of 0.1 and gradient clipping of 1.0. We use 2,000 warmup steps.
|
||||
|
||||
| params | learning rate | batch size |
|
||||
|--------|---------------|------------|
|
||||
| 6.7B | 3.0e-4 | 4M |
|
||||
| 13.0B | 3.0e-4 | 4M |
|
||||
| 32.5B | 1.5e-4 | 4M |
|
||||
| 65.2B | 1.5e-4 | 4M |
|
||||
|
||||
## Usage
|
||||
|
||||
### 1. Installation
|
||||
|
||||
Please install the latest ColossalAI from source.
|
||||
|
||||
```bash
|
||||
CUDA_EXT=1 pip install -U git+https://github.com/hpcaitech/ColossalAI
|
||||
```
|
||||
|
||||
Then install other dependencies.
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Additionally, we recommend you to use torch 1.13.1. We've tested our code on torch 1.13.1 and found it's compatible with our code and flash attention.
|
||||
|
||||
### 2. Download the dataset
|
||||
|
||||
The dataset can be automatically downloaded by using `huggingface/datasets`. You can specify the dataset path by `-d` or `--dataset`. The default dataset is `togethercomputer/RedPajama-Data-1T-Sample`.
|
||||
|
||||
### 3. Command line arguments
|
||||
|
||||
Yon can use colossalai run to launch multi-nodes training:
|
||||
```bash
|
||||
colossalai run --nproc_per_node YOUR_GPU_PER_NODE --hostfile YOUR_HOST_FILE \
|
||||
pretrain.py --OTHER_CONFIGURATIONS
|
||||
```
|
||||
|
||||
Here is a sample hostfile:
|
||||
|
||||
```text
|
||||
hostname1
|
||||
hostname2
|
||||
hostname3
|
||||
hostname4
|
||||
```
|
||||
|
||||
Make sure master node can access all nodes (including itself) by ssh without password.
|
||||
|
||||
Here is details about CLI arguments:
|
||||
|
||||
- Model configuration: `-c`, `--config`. `7b`, `13b`, `30b` and `65b` are supported for LLaMA-1, `7b`, `13b`, and `70b` are supported for LLaMA-2.
|
||||
- Booster plugin: `-p`, `--plugin`. `gemini`, `gemini_auto`, `zero2` and `zero2_cpu` are supported. For more details, please refer to [Booster plugins](https://colossalai.org/docs/basics/booster_plugins).
|
||||
- Dataset path: `-d`, `--dataset`. The default dataset is `togethercomputer/RedPajama-Data-1T-Sample`. It support any dataset from `datasets` with the same data format as RedPajama.
|
||||
- Number of epochs: `-e`, `--num_epochs`. The default value is 1.
|
||||
- Local batch size: `-b`, `--batch_size`. Batch size per GPU. The default value is 2.
|
||||
- Learning rate: `--lr`. The default value is 3e-4.
|
||||
- Weight decay: `-w`, `--weight_decay`. The default value is 0.1.
|
||||
- Warmup steps: `-s`, `--warmup_steps`. The default value is 2000.
|
||||
- Gradient checkpointing: `-g`, `--gradient_checkpoint`. The default value is `False`. This saves memory at the cost of speed. You'd better enable this option when training with a large batch size.
|
||||
- Max length: `-l`, `--max_length`. The default value is 4096.
|
||||
- Mixed precision: `-x`, `--mixed_precision`. The default value is "fp16". "fp16" and "bf16" are supported.
|
||||
- Save interval: `-i`, `--save_interval`. The interval (steps) of saving checkpoints. The default value is 1000.
|
||||
- Checkpoint directory: `-o`, `--save_dir`. The directoty path to save checkpoints. The default value is `checkpoint`.
|
||||
- Checkpoint to load: `-f`, `--load`. The checkpoint path to load. The default value is `None`.
|
||||
- Gradient clipping: `--gradient_clipping`. The default value is 1.0.
|
||||
- Tensorboard log directory: `-t`, `--tensorboard_dir`. The directory path to save tensorboard logs. The default value is `tb_logs`.
|
||||
- Flash attention: `-a`, `--flash_attention`. If you want to use flash attention, you must install `flash-attn`. The default value is `False`. This is helpful to accelerate training while saving memory. We recommend you always use flash attention.
|
||||
|
||||
|
||||
### 4. Shell Script Examples
|
||||
|
||||
For your convenience, we provide some shell scripts to run benchmark with various configurations.
|
||||
|
||||
You can find them in `scripts/benchmark_7B` and `scripts/benchmark_70B` directory. The main command should be in the format of:
|
||||
```bash
|
||||
colossalai run --nproc_per_node YOUR_GPU_PER_NODE --hostfile YOUR_HOST_FILE \
|
||||
benchmark.py --OTHER_CONFIGURATIONS
|
||||
```
|
||||
Here we will show an example of how to run training
|
||||
llama pretraining with `gemini, batch_size=16, sequence_length=4096, gradient_checkpoint=True, flash_attn=True`.
|
||||
|
||||
#### a. Running environment
|
||||
This experiment was performed on 4 computing nodes with 32 A800 GPUs in total for LLaMA-1 65B. The nodes are
|
||||
connected with RDMA and GPUs within one node are fully connected with NVLink.
|
||||
|
||||
#### b. Running command
|
||||
|
||||
```bash
|
||||
cd scripts/benchmark_7B
|
||||
```
|
||||
|
||||
First, put your host file (`hosts.txt`) in this directory with your real host ip or host name.
|
||||
|
||||
Here is a sample `hosts.txt`:
|
||||
```text
|
||||
hostname1
|
||||
hostname2
|
||||
hostname3
|
||||
hostname4
|
||||
```
|
||||
|
||||
Then add environment variables to script if needed.
|
||||
|
||||
Finally, run the following command to start training:
|
||||
|
||||
```bash
|
||||
bash gemini.sh
|
||||
```
|
||||
#### c. Results
|
||||
If you run the above command successfully, you will get the following results:
|
||||
`max memory usage: 55491.10 MB, throughput: 24.26 samples/s, TFLOPS/GPU: 167.43`.
|
||||
|
||||
|
||||
## Reference
|
||||
```
|
||||
@article{bian2021colossal,
|
||||
title={Colossal-AI: A Unified Deep Learning System For Large-Scale Parallel Training},
|
||||
author={Bian, Zhengda and Liu, Hongxin and Wang, Boxiang and Huang, Haichen and Li, Yongbin and Wang, Chuanrui and Cui, Fan and You, Yang},
|
||||
journal={arXiv preprint arXiv:2110.14883},
|
||||
year={2021}
|
||||
}
|
||||
```
|
||||
|
||||
```bibtex
|
||||
@software{openlm2023openllama,
|
||||
author = {Geng, Xinyang and Liu, Hao},
|
||||
title = {OpenLLaMA: An Open Reproduction of LLaMA},
|
||||
month = May,
|
||||
year = 2023,
|
||||
url = {https://github.com/openlm-research/open_llama}
|
||||
}
|
||||
```
|
||||
|
||||
```bibtex
|
||||
@software{together2023redpajama,
|
||||
author = {Together Computer},
|
||||
title = {RedPajama-Data: An Open Source Recipe to Reproduce LLaMA training dataset},
|
||||
month = April,
|
||||
year = 2023,
|
||||
url = {https://github.com/togethercomputer/RedPajama-Data}
|
||||
}
|
||||
```
|
||||
|
||||
```bibtex
|
||||
@article{touvron2023llama,
|
||||
title={Llama: Open and efficient foundation language models},
|
||||
author={Touvron, Hugo and Lavril, Thibaut and Izacard, Gautier and Martinet, Xavier and Lachaux, Marie-Anne and Lacroix, Timoth{\'e}e and Rozi{\`e}re, Baptiste and Goyal, Naman and Hambro, Eric and Azhar, Faisal and others},
|
||||
journal={arXiv preprint arXiv:2302.13971},
|
||||
year={2023}
|
||||
}
|
||||
```
|
|
@ -0,0 +1,83 @@
|
|||
from types import MethodType
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from transformers.models.llama.modeling_llama import LlamaAttention, apply_rotary_pos_emb, repeat_kv
|
||||
|
||||
SUPPORT_XFORMERS = False
|
||||
SUPPORT_FLASH2 = False
|
||||
try:
|
||||
import xformers.ops as xops
|
||||
SUPPORT_XFORMERS = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
from flash_attn import flash_attn_func
|
||||
SUPPORT_FLASH2 = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
SUPPORT_FLASH = SUPPORT_XFORMERS or SUPPORT_FLASH2
|
||||
|
||||
|
||||
def llama_flash_attention(
|
||||
self: LlamaAttention,
|
||||
hidden_states: torch.Tensor,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
position_ids: Optional[torch.LongTensor] = None,
|
||||
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
||||
output_attentions: bool = False,
|
||||
use_cache: bool = False,
|
||||
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
||||
bsz, q_len, _ = hidden_states.size()
|
||||
|
||||
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
||||
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
||||
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
||||
|
||||
kv_seq_len = key_states.shape[-2]
|
||||
if past_key_value is not None:
|
||||
kv_seq_len += past_key_value[0].shape[-2]
|
||||
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
||||
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
||||
# [bsz, nh, t, hd]
|
||||
|
||||
if past_key_value is not None:
|
||||
# reuse k, v, self_attention
|
||||
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
||||
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
||||
|
||||
past_key_value = (key_states, value_states) if use_cache else None
|
||||
|
||||
# repeat k/v heads if n_kv_heads < n_heads
|
||||
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
||||
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
||||
|
||||
# q, k, v is [B, H, S, K] and xformers need [B, S, H, K]. returns [B, S, H, K]
|
||||
query_states = query_states.transpose(1, 2)
|
||||
key_states = key_states.transpose(1, 2)
|
||||
value_states = value_states.transpose(1, 2)
|
||||
if SUPPORT_FLASH2:
|
||||
attn_output = flash_attn_func(query_states, key_states, value_states, causal=True)
|
||||
else:
|
||||
attn_output = xops.memory_efficient_attention(query_states,
|
||||
key_states,
|
||||
value_states,
|
||||
attn_bias=xops.LowerTriangularMask())
|
||||
|
||||
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
||||
|
||||
attn_output = self.o_proj(attn_output)
|
||||
|
||||
if not output_attentions:
|
||||
attn_weights = None
|
||||
|
||||
return attn_output, attn_weights, past_key_value
|
||||
|
||||
|
||||
def replace_xformers(model: nn.Module):
|
||||
for module in model.modules():
|
||||
if isinstance(module, LlamaAttention):
|
||||
module.forward = MethodType(llama_flash_attention, module)
|
|
@ -0,0 +1,211 @@
|
|||
import argparse
|
||||
import resource
|
||||
from contextlib import nullcontext
|
||||
|
||||
import torch
|
||||
from attn import SUPPORT_FLASH, replace_xformers
|
||||
from data_utils import RandomDataset
|
||||
from model_utils import format_numel_str, get_model_numel
|
||||
from performance_evaluator import PerformanceEvaluator
|
||||
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, MixedPrecision
|
||||
from tqdm import tqdm
|
||||
from transformers.models.llama.configuration_llama import LlamaConfig
|
||||
from transformers.models.llama.modeling_llama import LlamaForCausalLM
|
||||
|
||||
import colossalai
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, TorchFSDPPlugin
|
||||
from colossalai.cluster import DistCoordinator
|
||||
from colossalai.lazy import LazyInitContext
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.utils import get_current_device
|
||||
|
||||
# ==============================
|
||||
# Constants
|
||||
# ==============================
|
||||
|
||||
MODEL_CONFIGS = {
|
||||
'7b':
|
||||
LlamaConfig(max_position_embeddings=4096),
|
||||
'13b':
|
||||
LlamaConfig(hidden_size=5120,
|
||||
intermediate_size=13824,
|
||||
num_hidden_layers=40,
|
||||
num_attention_heads=40,
|
||||
max_position_embeddings=4096),
|
||||
'70b':
|
||||
LlamaConfig(hidden_size=8192,
|
||||
intermediate_size=28672,
|
||||
num_hidden_layers=80,
|
||||
num_attention_heads=64,
|
||||
max_position_embeddings=4096,
|
||||
num_key_value_heads=8),
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
# ==============================
|
||||
# Parse Arguments
|
||||
# ==============================
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-c', '--config', type=str, default='7b', help='Model configuration')
|
||||
parser.add_argument('-p',
|
||||
'--plugin',
|
||||
choices=['gemini', 'gemini_auto', 'fsdp', 'fsdp_cpu', '3d', '3d_cpu'],
|
||||
default='gemini',
|
||||
help='Choose which plugin to use')
|
||||
parser.add_argument('-b', '--batch_size', type=int, default=2, help='Batch size')
|
||||
parser.add_argument('-s', '--num_steps', type=int, default=5, help='Number of steps to run')
|
||||
parser.add_argument('-i', '--ignore_steps', type=int, default=2, help='Number of steps to ignore')
|
||||
parser.add_argument('-g', '--grad_checkpoint', action='store_true', help='Use gradient checkpointing')
|
||||
parser.add_argument('-l', '--max_length', type=int, default=4096, help='Max sequence length')
|
||||
parser.add_argument('-w',
|
||||
'--warmup_ratio',
|
||||
type=float,
|
||||
default=0.8,
|
||||
help='warm up ratio of non-model data. Only for gemini-auto')
|
||||
parser.add_argument('-m', '--memory_limit', type=int, help='Gemini memory limit in mb')
|
||||
parser.add_argument('-x', '--xformers', action='store_true', help='Use xformers')
|
||||
parser.add_argument('--shard_param_frac', type=float, default=1.0, help='Shard param fraction. Only for gemini')
|
||||
parser.add_argument('--offload_optim_frac', type=float, default=0.0, help='Offload optim fraction. Only for gemini')
|
||||
parser.add_argument('--offload_param_frac', type=float, default=0.0, help='Offload param fraction. Only for gemini')
|
||||
parser.add_argument('--tp', type=int, default=1, help='Tensor parallel size')
|
||||
parser.add_argument('--pp', type=int, default=1, help='Pipeline parallel size')
|
||||
parser.add_argument('--mbs', type=int, default=1)
|
||||
parser.add_argument('--zero', type=int, default=0)
|
||||
args = parser.parse_args()
|
||||
|
||||
colossalai.launch_from_torch({})
|
||||
coordinator = DistCoordinator()
|
||||
|
||||
def empty_init():
|
||||
pass
|
||||
|
||||
# ==============================
|
||||
# Initialize Booster
|
||||
# ==============================
|
||||
use_empty_init = True
|
||||
if args.plugin == 'gemini':
|
||||
plugin = GeminiPlugin(precision='bf16',
|
||||
shard_param_frac=args.shard_param_frac,
|
||||
offload_optim_frac=args.offload_optim_frac,
|
||||
offload_param_frac=args.offload_param_frac)
|
||||
elif args.plugin == 'gemini_auto':
|
||||
plugin = GeminiPlugin(placement_policy='auto', precision='bf16', warmup_non_model_data_ratio=args.warmup_ratio)
|
||||
elif args.plugin == 'fsdp':
|
||||
if use_empty_init:
|
||||
plugin = TorchFSDPPlugin(
|
||||
mixed_precision=MixedPrecision(param_dtype=torch.float16,
|
||||
reduce_dtype=torch.float16,
|
||||
buffer_dtype=torch.float16),
|
||||
param_init_fn=empty_init(),
|
||||
)
|
||||
else:
|
||||
plugin = TorchFSDPPlugin(mixed_precision=MixedPrecision(
|
||||
param_dtype=torch.float16, reduce_dtype=torch.float16, buffer_dtype=torch.float16))
|
||||
elif args.plugin == 'fsdp_cpu':
|
||||
if use_empty_init:
|
||||
plugin = TorchFSDPPlugin(
|
||||
mixed_precision=MixedPrecision(param_dtype=torch.float16,
|
||||
reduce_dtype=torch.float16,
|
||||
buffer_dtype=torch.float16),
|
||||
cpu_offload=CPUOffload(offload_params=True),
|
||||
param_init_fn=empty_init(),
|
||||
)
|
||||
else:
|
||||
plugin = TorchFSDPPlugin(mixed_precision=MixedPrecision(param_dtype=torch.float16,
|
||||
reduce_dtype=torch.float16,
|
||||
buffer_dtype=torch.float16),
|
||||
cpu_offload=CPUOffload(offload_params=True))
|
||||
elif args.plugin == '3d':
|
||||
plugin = HybridParallelPlugin(tp_size=args.tp,
|
||||
pp_size=args.pp,
|
||||
zero_stage=args.zero,
|
||||
enable_fused_normalization=True,
|
||||
num_microbatches=args.mbs,
|
||||
precision='bf16')
|
||||
elif args.plugin == '3d_cpu':
|
||||
plugin = HybridParallelPlugin(tp_size=args.tp,
|
||||
pp_size=args.pp,
|
||||
zero_stage=args.zero,
|
||||
cpu_offload=True,
|
||||
enable_fused_normalization=True,
|
||||
num_microbatches=args.mbs,
|
||||
initial_scale=2**8,
|
||||
precision='bf16')
|
||||
else:
|
||||
raise ValueError(f'Unknown plugin {args.plugin}')
|
||||
|
||||
booster = Booster(plugin=plugin)
|
||||
|
||||
# ==============================
|
||||
# Initialize Dataset and Dataloader
|
||||
# ==============================
|
||||
dp_size = plugin.dp_size if isinstance(plugin, HybridParallelPlugin) else coordinator.world_size
|
||||
|
||||
config = MODEL_CONFIGS[args.config]
|
||||
dataset = RandomDataset(num_samples=args.batch_size * args.num_steps * dp_size,
|
||||
max_length=args.max_length,
|
||||
vocab_size=config.vocab_size)
|
||||
dataloader = plugin.prepare_dataloader(dataset, batch_size=args.batch_size, shuffle=True, drop_last=True)
|
||||
|
||||
# ==============================
|
||||
# Initialize Model and Optimizer
|
||||
# ==============================
|
||||
init_ctx = LazyInitContext(
|
||||
default_device=get_current_device()) if isinstance(plugin,
|
||||
(GeminiPlugin, HybridParallelPlugin)) else nullcontext()
|
||||
|
||||
with init_ctx:
|
||||
model = LlamaForCausalLM(config)
|
||||
|
||||
if args.grad_checkpoint:
|
||||
model.gradient_checkpointing_enable()
|
||||
|
||||
if args.xformers:
|
||||
assert SUPPORT_FLASH, 'Use flash attention while xfomers is not installed'
|
||||
replace_xformers(model)
|
||||
|
||||
model_numel = get_model_numel(model)
|
||||
coordinator.print_on_master(f'Model params: {format_numel_str(model_numel)}')
|
||||
performance_evaluator = PerformanceEvaluator(model_numel,
|
||||
args.grad_checkpoint,
|
||||
args.ignore_steps,
|
||||
dp_world_size=dp_size)
|
||||
|
||||
optimizer = HybridAdam(model.parameters())
|
||||
torch.set_default_dtype(torch.bfloat16)
|
||||
model, optimizer, _, dataloader, _ = booster.boost(model, optimizer, dataloader=dataloader)
|
||||
torch.set_default_dtype(torch.float)
|
||||
coordinator.print_on_master(f'Booster init max CUDA memory: {torch.cuda.max_memory_allocated()/1024**2:.2f} MB')
|
||||
coordinator.print_on_master(
|
||||
f'Booster init max CPU memory: {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024:.2f} MB')
|
||||
|
||||
if isinstance(plugin, HybridParallelPlugin) and args.pp > 1:
|
||||
data_iter = iter(dataloader)
|
||||
for step in tqdm(range(len(dataloader)), desc='Step', disable=not coordinator.is_master()):
|
||||
performance_evaluator.on_step_start(step)
|
||||
booster.execute_pipeline(data_iter,
|
||||
model,
|
||||
criterion=lambda outputs, inputs: outputs[0],
|
||||
optimizer=optimizer,
|
||||
return_loss=False)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
performance_evaluator.on_step_end(input_ids=torch.empty(args.batch_size, args.max_length))
|
||||
else:
|
||||
for step, batch in enumerate(tqdm(dataloader, desc='Step', disable=not coordinator.is_master())):
|
||||
performance_evaluator.on_step_start(step)
|
||||
outputs = model(**batch)
|
||||
loss = outputs[0]
|
||||
booster.backward(loss, optimizer)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
performance_evaluator.on_step_end(**batch)
|
||||
|
||||
performance_evaluator.on_fit_end()
|
||||
coordinator.print_on_master(f'Max CUDA memory usage: {torch.cuda.max_memory_allocated()/1024**2:.2f} MB')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,119 @@
|
|||
import json
|
||||
import random
|
||||
from typing import Iterator, Optional
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch.distributed import ProcessGroup
|
||||
from torch.distributed.distributed_c10d import _get_default_group
|
||||
from torch.utils.data import DataLoader, Dataset, DistributedSampler
|
||||
|
||||
from colossalai.utils import get_current_device
|
||||
|
||||
|
||||
class StatefulDistributedSampler(DistributedSampler):
|
||||
|
||||
def __init__(self,
|
||||
dataset: Dataset,
|
||||
num_replicas: Optional[int] = None,
|
||||
rank: Optional[int] = None,
|
||||
shuffle: bool = True,
|
||||
seed: int = 0,
|
||||
drop_last: bool = False) -> None:
|
||||
super().__init__(dataset, num_replicas, rank, shuffle, seed, drop_last)
|
||||
self.start_index: int = 0
|
||||
|
||||
def __iter__(self) -> Iterator:
|
||||
iterator = super().__iter__()
|
||||
indices = list(iterator)
|
||||
indices = indices[self.start_index:]
|
||||
return iter(indices)
|
||||
|
||||
def __len__(self) -> int:
|
||||
return self.num_samples - self.start_index
|
||||
|
||||
def set_start_index(self, start_index: int) -> None:
|
||||
self.start_index = start_index
|
||||
|
||||
|
||||
def prepare_dataloader(dataset,
|
||||
batch_size,
|
||||
shuffle=False,
|
||||
seed=1024,
|
||||
drop_last=False,
|
||||
pin_memory=False,
|
||||
num_workers=0,
|
||||
process_group: Optional[ProcessGroup] = None,
|
||||
**kwargs):
|
||||
r"""
|
||||
Prepare a dataloader for distributed training. The dataloader will be wrapped by
|
||||
`torch.utils.data.DataLoader` and `StatefulDistributedSampler`.
|
||||
|
||||
|
||||
Args:
|
||||
dataset (`torch.utils.data.Dataset`): The dataset to be loaded.
|
||||
shuffle (bool, optional): Whether to shuffle the dataset. Defaults to False.
|
||||
seed (int, optional): Random worker seed for sampling, defaults to 1024.
|
||||
add_sampler: Whether to add ``DistributedDataParallelSampler`` to the dataset. Defaults to True.
|
||||
drop_last (bool, optional): Set to True to drop the last incomplete batch, if the dataset size
|
||||
is not divisible by the batch size. If False and the size of dataset is not divisible by
|
||||
the batch size, then the last batch will be smaller, defaults to False.
|
||||
pin_memory (bool, optional): Whether to pin memory address in CPU memory. Defaults to False.
|
||||
num_workers (int, optional): Number of worker threads for this dataloader. Defaults to 0.
|
||||
kwargs (dict): optional parameters for ``torch.utils.data.DataLoader``, more details could be found in
|
||||
`DataLoader <https://pytorch.org/docs/stable/_modules/torch/utils/data/dataloader.html#DataLoader>`_.
|
||||
|
||||
Returns:
|
||||
:class:`torch.utils.data.DataLoader`: A DataLoader used for training or testing.
|
||||
"""
|
||||
_kwargs = kwargs.copy()
|
||||
process_group = process_group or _get_default_group()
|
||||
sampler = StatefulDistributedSampler(dataset,
|
||||
num_replicas=process_group.size(),
|
||||
rank=process_group.rank(),
|
||||
shuffle=shuffle)
|
||||
|
||||
# Deterministic dataloader
|
||||
def seed_worker(worker_id):
|
||||
worker_seed = seed
|
||||
np.random.seed(worker_seed)
|
||||
torch.manual_seed(worker_seed)
|
||||
random.seed(worker_seed)
|
||||
|
||||
return DataLoader(dataset,
|
||||
batch_size=batch_size,
|
||||
sampler=sampler,
|
||||
worker_init_fn=seed_worker,
|
||||
drop_last=drop_last,
|
||||
pin_memory=pin_memory,
|
||||
num_workers=num_workers,
|
||||
**_kwargs)
|
||||
|
||||
|
||||
def load_json(file_path: str):
|
||||
with open(file_path, 'r') as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def save_json(data, file_path: str):
|
||||
with open(file_path, 'w') as f:
|
||||
json.dump(data, f, indent=4)
|
||||
|
||||
|
||||
class RandomDataset(Dataset):
|
||||
|
||||
def __init__(self, num_samples: int = 1000, max_length: int = 2048, vocab_size: int = 32000):
|
||||
self.num_samples = num_samples
|
||||
self.max_length = max_length
|
||||
self.input_ids = torch.randint(0, vocab_size, (num_samples, max_length), device=get_current_device())
|
||||
self.attention_mask = torch.ones_like(self.input_ids)
|
||||
|
||||
def __len__(self):
|
||||
return self.num_samples
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return {
|
||||
'input_ids': self.input_ids[idx],
|
||||
'attention_mask': self.attention_mask[idx],
|
||||
'labels': self.input_ids[idx]
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
from contextlib import contextmanager
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
@contextmanager
|
||||
def low_precision_init(target_dtype: torch.dtype = torch.float16):
|
||||
dtype = torch.get_default_dtype()
|
||||
try:
|
||||
torch.set_default_dtype(target_dtype)
|
||||
yield
|
||||
finally:
|
||||
torch.set_default_dtype(dtype)
|
||||
|
||||
|
||||
def get_model_numel(model: nn.Module) -> int:
|
||||
return sum(p.numel() for p in model.parameters())
|
||||
|
||||
|
||||
def format_numel_str(numel: int) -> str:
|
||||
B = 1024**3
|
||||
M = 1024**2
|
||||
K = 1024
|
||||
if numel >= B:
|
||||
return f'{numel / B:.2f} B'
|
||||
elif numel >= M:
|
||||
return f'{numel / M:.2f} M'
|
||||
elif numel >= K:
|
||||
return f'{numel / K:.2f} K'
|
||||
else:
|
||||
return f'{numel}'
|
|
@ -0,0 +1,102 @@
|
|||
from time import time
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from torch import Tensor
|
||||
|
||||
from colossalai.cluster import DistCoordinator
|
||||
|
||||
|
||||
def divide(x: float, y: float) -> float:
|
||||
if y == 0:
|
||||
return float('inf')
|
||||
elif y == float('inf'):
|
||||
return float('nan')
|
||||
return x / y
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def all_reduce_mean(x: float, world_size: int) -> float:
|
||||
if world_size == 1:
|
||||
return x
|
||||
tensor = torch.tensor([x], device=torch.cuda.current_device())
|
||||
dist.all_reduce(tensor)
|
||||
tensor = tensor / world_size
|
||||
return tensor.item()
|
||||
|
||||
|
||||
class Timer:
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.start_time: Optional[float] = None
|
||||
self.duration: float = 0.
|
||||
|
||||
def start(self) -> None:
|
||||
self.start_time = time()
|
||||
|
||||
def end(self) -> None:
|
||||
assert self.start_time is not None
|
||||
self.duration += time() - self.start_time
|
||||
self.start_time = None
|
||||
|
||||
def reset(self) -> None:
|
||||
self.duration = 0.
|
||||
|
||||
|
||||
class PerformanceEvaluator:
|
||||
"""
|
||||
Callback for valuate the performance of the model.
|
||||
Args:
|
||||
actor_num_params: The number of parameters of the actor model.
|
||||
critic_num_params: The number of parameters of the critic model.
|
||||
initial_model_num_params: The number of parameters of the initial model.
|
||||
reward_model_num_params: The number of parameters of the reward model.
|
||||
enable_grad_checkpoint: Whether to enable gradient checkpointing.
|
||||
ignore_episodes: The number of episodes to ignore when calculating the performance.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
model_numel: int,
|
||||
enable_grad_checkpoint: bool = False,
|
||||
ignore_steps: int = 0,
|
||||
dp_world_size: Optional[int] = None) -> None:
|
||||
self.model_numel = model_numel
|
||||
self.enable_grad_checkpoint = enable_grad_checkpoint
|
||||
self.ignore_steps = ignore_steps
|
||||
|
||||
self.coordinator = DistCoordinator()
|
||||
self.dp_world_size = dp_world_size or self.coordinator.world_size
|
||||
self.disable: bool = False
|
||||
self.timer = Timer()
|
||||
self.num_samples: int = 0
|
||||
self.flop: int = 0
|
||||
|
||||
def on_step_start(self, step: int) -> None:
|
||||
self.disable = self.ignore_steps > 0 and step < self.ignore_steps
|
||||
if self.disable:
|
||||
return
|
||||
torch.cuda.synchronize()
|
||||
self.timer.start()
|
||||
|
||||
def on_step_end(self, input_ids: Tensor, **kwargs) -> None:
|
||||
if self.disable:
|
||||
return
|
||||
torch.cuda.synchronize()
|
||||
self.timer.end()
|
||||
|
||||
batch_size, seq_len = input_ids.shape
|
||||
|
||||
self.num_samples += batch_size
|
||||
self.flop += batch_size * seq_len * self.model_numel * 2 * (3 + int(self.enable_grad_checkpoint))
|
||||
|
||||
def on_fit_end(self) -> None:
|
||||
avg_duration = all_reduce_mean(self.timer.duration, self.coordinator.world_size)
|
||||
avg_throughput = self.num_samples * self.dp_world_size / (avg_duration + 1e-12)
|
||||
mp_world_size = self.coordinator.world_size // self.dp_world_size
|
||||
avg_tflops_per_gpu = self.flop / 1e12 / (avg_duration + 1e-12) / mp_world_size
|
||||
self.coordinator.print_on_master(
|
||||
f'num_samples: {self.num_samples}, dp_world_size: {self.dp_world_size}, flop: {self.flop}, avg_duration: {avg_duration}, '
|
||||
f'avg_throughput: {avg_throughput}')
|
||||
self.coordinator.print_on_master(
|
||||
f'Throughput: {avg_throughput:.2f} samples/sec, TFLOPS per GPU: {avg_tflops_per_gpu:.2f}')
|
|
@ -0,0 +1,275 @@
|
|||
import argparse
|
||||
import os
|
||||
import resource
|
||||
from contextlib import nullcontext
|
||||
from functools import partial
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
from attn import SUPPORT_XFORMERS, replace_xformers
|
||||
from data_utils import load_json, prepare_dataloader, save_json
|
||||
from datasets import load_dataset
|
||||
from torch.optim import Optimizer
|
||||
from torch.optim.lr_scheduler import _LRScheduler
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
from tqdm import tqdm
|
||||
from transformers.models.llama.configuration_llama import LlamaConfig
|
||||
from transformers.models.llama.modeling_llama import LlamaForCausalLM
|
||||
from transformers.models.llama.tokenization_llama import LlamaTokenizer
|
||||
|
||||
import colossalai
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin
|
||||
from colossalai.cluster import DistCoordinator
|
||||
from colossalai.lazy import LazyInitContext
|
||||
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.utils import get_current_device
|
||||
|
||||
MODEL_CONFIGS = {
|
||||
'7b':
|
||||
LlamaConfig(max_position_embeddings=4096),
|
||||
'13b':
|
||||
LlamaConfig(hidden_size=5120,
|
||||
intermediate_size=13824,
|
||||
num_hidden_layers=40,
|
||||
num_attention_heads=40,
|
||||
max_position_embeddings=4096),
|
||||
'70b':
|
||||
LlamaConfig(hidden_size=8192,
|
||||
intermediate_size=28672,
|
||||
num_hidden_layers=80,
|
||||
num_attention_heads=64,
|
||||
max_position_embeddings=4096,
|
||||
num_key_value_heads=8),
|
||||
}
|
||||
|
||||
|
||||
def get_model_numel(model: nn.Module) -> int:
|
||||
return sum(p.numel() for p in model.parameters())
|
||||
|
||||
|
||||
def format_numel_str(numel: int) -> str:
|
||||
B = 1024**3
|
||||
M = 1024**2
|
||||
K = 1024
|
||||
if numel >= B:
|
||||
return f'{numel / B:.2f} B'
|
||||
elif numel >= M:
|
||||
return f'{numel / M:.2f} M'
|
||||
elif numel >= K:
|
||||
return f'{numel / K:.2f} K'
|
||||
else:
|
||||
return f'{numel}'
|
||||
|
||||
|
||||
def tokenize_batch(batch, tokenizer: Optional[LlamaTokenizer] = None, max_length: int = 2048):
|
||||
texts = [sample['text'] for sample in batch]
|
||||
data = tokenizer(texts, return_tensors="pt", padding='max_length', truncation=True, max_length=max_length)
|
||||
data['labels'] = data['input_ids'].clone()
|
||||
return data
|
||||
|
||||
|
||||
def all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor:
|
||||
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
|
||||
tensor.div_(dist.get_world_size())
|
||||
return tensor
|
||||
|
||||
|
||||
def save(booster: Booster, model: nn.Module, optimizer: Optimizer, lr_scheduler: _LRScheduler, epoch: int, step: int,
|
||||
batch_size: int, coordinator: DistCoordinator, save_dir: str):
|
||||
save_dir = os.path.join(save_dir, f'epoch{epoch}-step{step}')
|
||||
os.makedirs(os.path.join(save_dir, 'model'), exist_ok=True)
|
||||
|
||||
booster.save_model(model, os.path.join(save_dir, 'model'), shard=True)
|
||||
booster.save_optimizer(optimizer, os.path.join(save_dir, 'optimizer'), shard=True)
|
||||
booster.save_lr_scheduler(lr_scheduler, os.path.join(save_dir, 'lr_scheduler'))
|
||||
running_states = {
|
||||
'epoch': epoch,
|
||||
'step': step,
|
||||
'sample_start_index': step * batch_size,
|
||||
}
|
||||
if coordinator.is_master():
|
||||
save_json(running_states, os.path.join(save_dir, 'running_states.json'))
|
||||
|
||||
|
||||
def load(booster: Booster, model: nn.Module, optimizer: Optimizer, lr_scheduler: _LRScheduler,
|
||||
load_dir: str) -> Tuple[int, int, int]:
|
||||
booster.load_model(model, os.path.join(load_dir, 'model'))
|
||||
booster.load_optimizer(optimizer, os.path.join(load_dir, 'optimizer'))
|
||||
booster.load_lr_scheduler(lr_scheduler, os.path.join(load_dir, 'lr_scheduler'))
|
||||
running_states = load_json(os.path.join(load_dir, 'running_states.json'))
|
||||
return running_states['epoch'], running_states['step'], running_states['sample_start_index']
|
||||
|
||||
|
||||
def main():
|
||||
# ==============================
|
||||
# Parse Arguments
|
||||
# ==============================
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-c', '--config', type=str, default='7b', help='Model configuration')
|
||||
parser.add_argument('-p',
|
||||
'--plugin',
|
||||
choices=['gemini', 'gemini_auto', 'zero2', 'zero2_cpu'],
|
||||
default='gemini',
|
||||
help='Choose which plugin to use')
|
||||
parser.add_argument('-d',
|
||||
'--dataset',
|
||||
type=str,
|
||||
default='togethercomputer/RedPajama-Data-1T-Sample',
|
||||
help='Data set path')
|
||||
parser.add_argument('-e', '--num_epochs', type=int, default=1, help='Number of epochs')
|
||||
parser.add_argument('-b', '--batch_size', type=int, default=2, help='Local batch size')
|
||||
parser.add_argument('--lr', type=float, default=3e-4, help='Learning rate')
|
||||
parser.add_argument('-w', '--weigth_decay', type=float, default=0.1, help='Weight decay')
|
||||
parser.add_argument('-s', '--warmup_steps', type=int, default=2000, help='Warmup steps')
|
||||
parser.add_argument('-g', '--grad_checkpoint', action='store_true', help='Use gradient checkpointing')
|
||||
parser.add_argument('-l', '--max_length', type=int, default=4096, help='Max sequence length')
|
||||
parser.add_argument('-x', '--mixed_precision', default='fp16', choices=['fp16', 'bf16'], help='Mixed precision')
|
||||
parser.add_argument('-i', '--save_interval', type=int, default=1000, help='Save interval')
|
||||
parser.add_argument('-o', '--save_dir', type=str, default='checkpoint', help='Checkpoint directory')
|
||||
parser.add_argument('-f', '--load', type=str, default=None, help='Load checkpoint')
|
||||
parser.add_argument('--grad_clip', type=float, default=1.0, help='Gradient clipping')
|
||||
parser.add_argument('-t', '--tensorboard_dir', type=str, default='tb_logs', help='Tensorboard directory')
|
||||
parser.add_argument('-a', '--flash_attention', action='store_true', help='Use Flash Attention')
|
||||
args = parser.parse_args()
|
||||
|
||||
# ==============================
|
||||
# Initialize Distributed Training
|
||||
# ==============================
|
||||
colossalai.launch_from_torch({})
|
||||
coordinator = DistCoordinator()
|
||||
|
||||
# ==============================
|
||||
# Initialize Tensorboard
|
||||
# ==============================
|
||||
if coordinator.is_master():
|
||||
os.makedirs(args.tensorboard_dir, exist_ok=True)
|
||||
writer = SummaryWriter(args.tensorboard_dir)
|
||||
|
||||
# ==============================
|
||||
# Initialize Booster
|
||||
# ==============================
|
||||
if args.plugin == 'gemini':
|
||||
plugin = GeminiPlugin(precision=args.mixed_precision, initial_scale=2**16, max_norm=args.grad_clip)
|
||||
elif args.plugin == 'gemini_auto':
|
||||
plugin = GeminiPlugin(precision=args.mixed_precision,
|
||||
placement_policy='auto',
|
||||
initial_scale=2**16,
|
||||
max_norm=args.grad_clip)
|
||||
elif args.plugin == 'zero2':
|
||||
plugin = LowLevelZeroPlugin(stage=2,
|
||||
precision=args.mixed_precision,
|
||||
initial_scale=2**16,
|
||||
max_norm=args.grad_clip)
|
||||
elif args.plugin == 'zero2_cpu':
|
||||
plugin = LowLevelZeroPlugin(stage=2,
|
||||
precision=args.mixed_precision,
|
||||
initial_scale=2**16,
|
||||
cpu_offload=True,
|
||||
max_norm=args.grad_clip)
|
||||
else:
|
||||
raise ValueError(f'Unknown plugin {args.plugin}')
|
||||
|
||||
booster = Booster(plugin=plugin)
|
||||
|
||||
# ==============================
|
||||
# Initialize Tokenizer, Dataset and Dataloader
|
||||
# ==============================
|
||||
tokenizer = LlamaTokenizer.from_pretrained('hf-internal-testing/llama-tokenizer')
|
||||
# follows fast chat: https://github.com/lm-sys/FastChat/blob/main/fastchat/train/train.py#L257
|
||||
tokenizer.pad_token = tokenizer.unk_token
|
||||
|
||||
dataset = load_dataset(args.dataset)
|
||||
train_ds = dataset['train']
|
||||
dataloader = prepare_dataloader(train_ds,
|
||||
batch_size=args.batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
collate_fn=partial(tokenize_batch, tokenizer=tokenizer, max_length=args.max_length))
|
||||
|
||||
# ==============================
|
||||
# Initialize Model, Optimizer and LR Scheduler
|
||||
# ==============================
|
||||
config = MODEL_CONFIGS[args.config]
|
||||
init_ctx = LazyInitContext(
|
||||
default_device=get_current_device()) if isinstance(plugin, GeminiPlugin) else nullcontext()
|
||||
|
||||
with init_ctx:
|
||||
model = LlamaForCausalLM(config)
|
||||
|
||||
if args.grad_checkpoint:
|
||||
model.gradient_checkpointing_enable()
|
||||
if args.flash_attention:
|
||||
assert SUPPORT_XFORMERS, 'Use flash attention while xfomers is not installed'
|
||||
replace_xformers(model)
|
||||
|
||||
model_numel = get_model_numel(model)
|
||||
coordinator.print_on_master(f'Model params: {format_numel_str(model_numel)}')
|
||||
|
||||
optimizer = HybridAdam(model.parameters(), lr=args.lr, betas=(0.9, 0.95), weight_decay=args.weigth_decay)
|
||||
lr_scheduler = CosineAnnealingWarmupLR(optimizer,
|
||||
total_steps=args.num_epochs * len(dataloader),
|
||||
warmup_steps=args.warmup_steps,
|
||||
eta_min=0.1 * args.lr)
|
||||
default_dtype = torch.float16 if args.mixed_precision == 'fp16' else torch.bfloat16
|
||||
torch.set_default_dtype(default_dtype)
|
||||
model, optimizer, _, dataloader, lr_scheduler = booster.boost(model,
|
||||
optimizer,
|
||||
dataloader=dataloader,
|
||||
lr_scheduler=lr_scheduler)
|
||||
torch.set_default_dtype(torch.float)
|
||||
|
||||
coordinator.print_on_master(f'Booster init max CUDA memory: {torch.cuda.max_memory_allocated()/1024**2:.2f} MB')
|
||||
coordinator.print_on_master(
|
||||
f'Booster init max CPU memory: {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024:.2f} MB')
|
||||
|
||||
# load checkpoint if specified
|
||||
start_epoch = 0
|
||||
start_step = 0
|
||||
sampler_start_idx = 0
|
||||
if args.load is not None:
|
||||
coordinator.print_on_master('Loading checkpoint')
|
||||
start_epoch, start_step, sampler_start_idx = load(booster, model, optimizer, lr_scheduler, args.load)
|
||||
coordinator.print_on_master(f'Loaded checkpoint {args.load} at epoch {start_epoch} step {start_step}')
|
||||
|
||||
num_steps_per_epoch = len(dataloader)
|
||||
# if resume training, set the sampler start index to the correct value
|
||||
dataloader.sampler.set_start_index(sampler_start_idx)
|
||||
for epoch in range(start_epoch, args.num_epochs):
|
||||
dataloader.sampler.set_epoch(epoch)
|
||||
with tqdm(enumerate(dataloader),
|
||||
desc=f'Epoch {epoch}',
|
||||
disable=not coordinator.is_master(),
|
||||
total=num_steps_per_epoch,
|
||||
initial=start_step) as pbar:
|
||||
for step, batch in pbar:
|
||||
batch = {k: v.cuda() for k, v in batch.items()}
|
||||
outputs = model(**batch)
|
||||
loss = outputs[0]
|
||||
booster.backward(loss, optimizer)
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
all_reduce_mean(loss)
|
||||
pbar.set_postfix({'loss': loss.item()})
|
||||
if coordinator.is_master():
|
||||
writer.add_scalar('loss', loss.item(), epoch * num_steps_per_epoch + step)
|
||||
|
||||
if args.save_interval > 0 and (step + 1) % args.save_interval == 0:
|
||||
coordinator.print_on_master(f'Saving checkpoint')
|
||||
save(booster, model, optimizer, lr_scheduler, epoch, step + 1, args.batch_size, coordinator,
|
||||
args.save_dir)
|
||||
coordinator.print_on_master(f'Saved checkpoint at epoch {epoch} step {step + 1}')
|
||||
# the continue epochs are not resumed, so we need to reset the sampler start index and start step
|
||||
dataloader.sampler.set_start_index(0)
|
||||
start_step = 0
|
||||
|
||||
coordinator.print_on_master(f'Max CUDA memory usage: {torch.cuda.max_memory_allocated()/1024**2:.2f} MB')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,9 @@
|
|||
colossalai>=0.3.0
|
||||
datasets
|
||||
numpy
|
||||
torch>=1.12.0,<=2.0.0
|
||||
tqdm
|
||||
transformers
|
||||
flash-attn>=2.0.0,<=2.0.5
|
||||
SentencePiece==0.1.99
|
||||
tensorboard==2.14.0
|
|
@ -0,0 +1,17 @@
|
|||
#!/bin/bash
|
||||
|
||||
# TODO: fix this
|
||||
echo "3D parallel for LLaMA-2 is not ready yet"
|
||||
exit 1
|
||||
|
||||
################
|
||||
#Load your environments and modules here
|
||||
################
|
||||
|
||||
HOSTFILE=$(realpath hosts.txt)
|
||||
|
||||
cd ../..
|
||||
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
colossalai run --nproc_per_node 8 --hostfile $HOSTFILE benchmark.py -c 70b -p 3d -g -x -b 8 --tp 4 --pp 2 --mbs 4
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
|
||||
################
|
||||
#Load your environments and modules here
|
||||
################
|
||||
|
||||
HOSTFILE=$(realpath hosts.txt)
|
||||
|
||||
cd ../..
|
||||
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
colossalai run --nproc_per_node 8 --hostfile $HOSTFILE benchmark.py -c 70b -g -x -b 2
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
|
||||
################
|
||||
#Load your environments and modules here
|
||||
################
|
||||
|
||||
HOSTFILE=$(realpath hosts.txt)
|
||||
|
||||
cd ../..
|
||||
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
colossalai run --nproc_per_node 8 --hostfile $HOSTFILE benchmark.py -c 70b -p gemini_auto -g -x -b 2
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
|
||||
################
|
||||
#Load your environments and modules here
|
||||
################
|
||||
|
||||
HOSTFILE=$(realpath hosts.txt)
|
||||
|
||||
cd ../..
|
||||
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
colossalai run --nproc_per_node 8 --hostfile $HOSTFILE benchmark.py -g -x -b 16
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
|
||||
################
|
||||
#Load your environments and modules here
|
||||
################
|
||||
|
||||
HOSTFILE=$(realpath hosts.txt)
|
||||
|
||||
cd ../..
|
||||
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
colossalai run --nproc_per_node 8 --hostfile $HOSTFILE benchmark.py -p gemini_auto -g -x -b 16
|
|
@ -1,22 +1,18 @@
|
|||
import time
|
||||
|
||||
import torch
|
||||
import tqdm
|
||||
import transformers
|
||||
from args import parse_benchmark_args
|
||||
from transformers import AutoConfig, OPTForCausalLM
|
||||
from transformers.utils.versions import require_version
|
||||
import tqdm
|
||||
|
||||
import colossalai
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.tensor import ProcessGroup, ShardSpec
|
||||
from colossalai.utils import get_current_device
|
||||
from colossalai.zero import ColoInitContext
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
|
||||
from colossalai.cluster import DistCoordinator
|
||||
|
||||
from args import parse_benchmark_args
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
|
||||
require_version("transformers>=4.20.0", "To fix: pip install -r requirements.txt")
|
||||
|
||||
|
@ -61,11 +57,11 @@ def main():
|
|||
transformers.utils.logging.set_verbosity_info()
|
||||
else:
|
||||
transformers.utils.logging.set_verbosity_error()
|
||||
|
||||
|
||||
# Whether to set limit of memory capacity
|
||||
if args.mem_cap > 0:
|
||||
colo_memory_cap(args.mem_cap)
|
||||
|
||||
|
||||
# Build OPT model
|
||||
config = AutoConfig.from_pretrained(args.model_name_or_path)
|
||||
model = OPTForCausalLM(config=config)
|
||||
|
@ -81,11 +77,7 @@ def main():
|
|||
if args.plugin.startswith('torch_ddp'):
|
||||
plugin = TorchDDPPlugin()
|
||||
elif args.plugin == 'gemini':
|
||||
plugin = GeminiPlugin(device=get_current_device(),
|
||||
placement_policy='cpu',
|
||||
pin_memory=True,
|
||||
strict_ddp_mode=True,
|
||||
initial_scale=2**5)
|
||||
plugin = GeminiPlugin(offload_optim_frac=1.0, pin_memory=True, initial_scale=2**5)
|
||||
elif args.plugin == 'low_level_zero':
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2**5)
|
||||
logger.info(f"Set plugin as {args.plugin}", ranks=[0])
|
||||
|
@ -96,18 +88,18 @@ def main():
|
|||
# Set booster
|
||||
booster = Booster(plugin=plugin, **booster_kwargs)
|
||||
model, optimizer, _, _, _ = booster.boost(model, optimizer)
|
||||
|
||||
|
||||
SEQ_LEN = 1024
|
||||
VOCAB_SIZE = 50257
|
||||
|
||||
# Start training.
|
||||
logger.info(f"Start testing", ranks=[0])
|
||||
progress_bar = tqdm.tqdm(total=args.max_train_steps, desc="Training Step", disable=not coordinator.is_master())
|
||||
|
||||
|
||||
torch.cuda.synchronize()
|
||||
model.train()
|
||||
start_time = time.time()
|
||||
|
||||
|
||||
for _ in range(args.max_train_steps):
|
||||
|
||||
input_ids, attn_mask = get_data(args.batch_size, SEQ_LEN, VOCAB_SIZE)
|
||||
|
@ -119,18 +111,19 @@ def main():
|
|||
|
||||
torch.cuda.synchronize()
|
||||
progress_bar.update(1)
|
||||
|
||||
# Compute Statistics
|
||||
|
||||
# Compute Statistics
|
||||
end_time = time.time()
|
||||
throughput = "{:.4f}".format((world_size * args.max_train_steps * args.batch_size) / (end_time - start_time))
|
||||
max_mem = format_num(torch.cuda.max_memory_allocated(device=torch.cuda.current_device()), bytes=True)
|
||||
|
||||
logger.info(f"Testing finished, "
|
||||
f"batch size per gpu: {args.batch_size}, "
|
||||
f"plugin: {args.plugin}, "
|
||||
f"throughput: {throughput}, "
|
||||
f"maximum memory usage per gpu: {max_mem}.",
|
||||
ranks=[0])
|
||||
|
||||
logger.info(
|
||||
f"Testing finished, "
|
||||
f"batch size per gpu: {args.batch_size}, "
|
||||
f"plugin: {args.plugin}, "
|
||||
f"throughput: {throughput}, "
|
||||
f"maximum memory usage per gpu: {max_mem}.",
|
||||
ranks=[0])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,25 +1,20 @@
|
|||
import time
|
||||
|
||||
import torch
|
||||
import datasets
|
||||
import torch
|
||||
import transformers
|
||||
from transformers import AutoConfig, OPTForCausalLM, AutoTokenizer
|
||||
from transformers import get_linear_schedule_with_warmup
|
||||
from transformers.utils.versions import require_version
|
||||
from args import parse_demo_args
|
||||
from data import NetflixDataset, netflix_collator
|
||||
from tqdm import tqdm
|
||||
from transformers import AutoConfig, AutoTokenizer, OPTForCausalLM, get_linear_schedule_with_warmup
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
import colossalai
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.tensor import ProcessGroup, ShardSpec
|
||||
from colossalai.utils import get_current_device
|
||||
from colossalai.zero import ColoInitContext
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
|
||||
from colossalai.cluster import DistCoordinator
|
||||
|
||||
from args import parse_demo_args
|
||||
from data import NetflixDataset, netflix_collator
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r requirements.txt")
|
||||
require_version("transformers>=4.20.0", "To fix: pip install -r requirements.txt")
|
||||
|
@ -30,18 +25,18 @@ def move_to_cuda(batch, device):
|
|||
|
||||
|
||||
def train_epoch(epoch, model, optimizer, lr_scheduler, dataloader, booster, coordinator):
|
||||
|
||||
|
||||
torch.cuda.synchronize()
|
||||
model.train()
|
||||
|
||||
with tqdm(dataloader, desc=f'Epoch [{epoch + 1}]', disable=not coordinator.is_master()) as pbar:
|
||||
|
||||
|
||||
for batch in pbar:
|
||||
|
||||
# Forward
|
||||
optimizer.zero_grad()
|
||||
batch = move_to_cuda(batch, torch.cuda.current_device())
|
||||
|
||||
|
||||
outputs = model(use_cache=False, **batch)
|
||||
loss = outputs['loss']
|
||||
|
||||
|
@ -72,7 +67,7 @@ def main():
|
|||
else:
|
||||
datasets.utils.logging.set_verbosity_error()
|
||||
transformers.utils.logging.set_verbosity_error()
|
||||
|
||||
|
||||
# Build OPT model
|
||||
config = AutoConfig.from_pretrained(args.model_name_or_path)
|
||||
model = OPTForCausalLM.from_pretrained(args.model_name_or_path, config=config)
|
||||
|
@ -88,43 +83,35 @@ def main():
|
|||
if args.plugin.startswith('torch_ddp'):
|
||||
plugin = TorchDDPPlugin()
|
||||
elif args.plugin == 'gemini':
|
||||
plugin = GeminiPlugin(device=get_current_device(),
|
||||
placement_policy='cpu',
|
||||
pin_memory=True,
|
||||
strict_ddp_mode=True,
|
||||
initial_scale=2**5)
|
||||
plugin = GeminiPlugin(offload_optim_frac=1.0, pin_memory=True, initial_scale=2**5)
|
||||
elif args.plugin == 'low_level_zero':
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2**5)
|
||||
logger.info(f"Set plugin as {args.plugin}", ranks=[0])
|
||||
|
||||
# Prepare tokenizer and dataloader
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
|
||||
dataset = NetflixDataset(tokenizer)
|
||||
dataloader = plugin.prepare_dataloader(dataset,
|
||||
batch_size=args.batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
collate_fn=netflix_collator)
|
||||
|
||||
|
||||
# Set optimizer
|
||||
optimizer = HybridAdam(model.parameters(),
|
||||
lr=(args.learning_rate * world_size),
|
||||
weight_decay=args.weight_decay)
|
||||
optimizer = HybridAdam(model.parameters(), lr=(args.learning_rate * world_size), weight_decay=args.weight_decay)
|
||||
|
||||
# Set lr scheduler
|
||||
total_steps = len(dataloader) * args.num_epoch
|
||||
num_warmup_steps = int(args.warmup_ratio * total_steps)
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer,
|
||||
num_warmup_steps=num_warmup_steps,
|
||||
num_training_steps=len(dataloader) * args.num_epoch
|
||||
)
|
||||
lr_scheduler = get_linear_schedule_with_warmup(optimizer,
|
||||
num_warmup_steps=num_warmup_steps,
|
||||
num_training_steps=len(dataloader) * args.num_epoch)
|
||||
|
||||
# Set booster
|
||||
booster = Booster(plugin=plugin, **booster_kwargs)
|
||||
model, optimizer, _, dataloader, lr_scheduler = booster.boost(model=model,
|
||||
optimizer=optimizer,
|
||||
dataloader=dataloader,
|
||||
model, optimizer, _, dataloader, lr_scheduler = booster.boost(model=model,
|
||||
optimizer=optimizer,
|
||||
dataloader=dataloader,
|
||||
lr_scheduler=lr_scheduler)
|
||||
|
||||
# Start finetuning
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import gzip
|
||||
import random
|
||||
from contextlib import nullcontext
|
||||
from functools import partial
|
||||
from time import time
|
||||
|
||||
|
@ -8,20 +8,17 @@ import torch
|
|||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
import tqdm
|
||||
from packaging import version
|
||||
|
||||
from colossalai.nn import HybridAdam
|
||||
from palm_pytorch import PaLM
|
||||
from palm_pytorch.autoregressive_wrapper import AutoregressiveWrapper
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
|
||||
import colossalai
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.tensor import ColoParameter, ComputePattern, ComputeSpec, ProcessGroup, ReplicaSpec, ShardSpec
|
||||
from colossalai.utils import MultiTimer, get_current_device
|
||||
from colossalai.zero import ColoInitContext, GeminiAdamOptimizer, ZeroDDP
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
|
||||
from colossalai.lazy import LazyInitContext
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.nn import HybridAdam
|
||||
from colossalai.utils import get_current_device
|
||||
|
||||
# constants
|
||||
|
||||
|
@ -44,23 +41,10 @@ def parse_args():
|
|||
help="The distributed plan [colossalai, pytorch].",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--tp_degree",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Tensor Parallelism Degree. Valid when using colossalai as dist plan.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--placement",
|
||||
type=str,
|
||||
default='cpu',
|
||||
help="Placement Policy for Gemini. Valid when using colossalai as dist plan.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--shardinit",
|
||||
type=bool,
|
||||
default=False,
|
||||
help=
|
||||
"Shard the tensors when init the model to shrink peak memory size on the assigned device. Valid when using colossalai as dist plan.",
|
||||
"--offload_optim_frac",
|
||||
type=float,
|
||||
default=1.0,
|
||||
help="Fraction of optimizer states to be offloaded. This is only used for gemini.",
|
||||
)
|
||||
parser.add_argument('-p',
|
||||
'--plugin',
|
||||
|
@ -111,51 +95,6 @@ def get_model_size(model: nn.Module):
|
|||
return total_numel
|
||||
|
||||
|
||||
|
||||
|
||||
# Parameter Sharding Strategies for Tensor Parallelism
|
||||
def split_param_single_dim_tp1d(dim: int, param: ColoParameter, pg: ProcessGroup):
|
||||
spec = (ShardSpec([dim], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
|
||||
param.set_tensor_spec(*spec)
|
||||
|
||||
|
||||
def split_param_row_tp1d(param: ColoParameter, pg: ProcessGroup):
|
||||
split_param_single_dim_tp1d(0, param, pg)
|
||||
|
||||
|
||||
def split_param_col_tp1d(param: ColoParameter, pg: ProcessGroup):
|
||||
split_param_single_dim_tp1d(-1, param, pg)
|
||||
|
||||
|
||||
# Tensor Parallel
|
||||
def tensor_parallelize(model: torch.nn.Module, pg: ProcessGroup):
|
||||
"""tensor_parallelize
|
||||
Sharding the Model Parameters.
|
||||
Args:
|
||||
model (torch.nn.Module): a torch module to be sharded
|
||||
"""
|
||||
for mn, module in model.named_modules():
|
||||
for pn, param in module.named_parameters(recurse=False):
|
||||
if hasattr(param, 'visited'):
|
||||
continue
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
if 'net.0' in mn:
|
||||
split_param_col_tp1d(param, pg) # column slice
|
||||
elif 'to_q' in mn:
|
||||
split_param_col_tp1d(param, pg) # column slice
|
||||
elif 'to_kv' in mn:
|
||||
split_param_row_tp1d(param, pg) # row slice
|
||||
elif 'to_out' in mn:
|
||||
split_param_row_tp1d(param, pg) # row slice
|
||||
elif '1.1' in mn:
|
||||
split_param_col_tp1d(param, pg) # column slice
|
||||
elif '1.2' in mn:
|
||||
split_param_row_tp1d(param, pg) # row slice
|
||||
else:
|
||||
param.set_dist_spec(ReplicaSpec())
|
||||
param.visited = True
|
||||
|
||||
|
||||
args = parse_args()
|
||||
if args.distplan not in ["colossalai", "pytorch"]:
|
||||
raise TypeError(f"{args.distplan} is error")
|
||||
|
@ -212,23 +151,18 @@ if args.distplan == "colossalai":
|
|||
if args.plugin.startswith('torch_ddp'):
|
||||
plugin = TorchDDPPlugin()
|
||||
elif args.plugin == 'gemini':
|
||||
plugin = GeminiPlugin(placement_policy=args.placement, strict_ddp_mode=True, initial_scale=2 ** 5)
|
||||
plugin = GeminiPlugin(offload_optim_frac=args.offload_optim_frac, initial_scale=2**5)
|
||||
elif args.plugin == 'low_level_zero':
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2 ** 5)
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2**5)
|
||||
logger.info(f"plugin: {plugin}")
|
||||
booster = Booster(plugin=plugin, **booster_kwargs)
|
||||
|
||||
default_pg = ProcessGroup(tp_degree=args.tp_degree)
|
||||
default_dist_spec = ShardSpec([-1], [args.tp_degree]) if args.shardinit else None
|
||||
ctx = ColoInitContext(device='cpu', default_dist_spec=default_dist_spec, default_pg=default_pg)
|
||||
ctx = LazyInitContext(default_device=get_current_device()) if args.plugin == 'gemini' else nullcontext()
|
||||
|
||||
with ctx:
|
||||
model = PaLM(num_tokens=50304, dim=4096, depth=64)
|
||||
model = AutoregressiveWrapper(model, max_seq_len=SEQ_LEN)
|
||||
|
||||
pg = default_pg
|
||||
tensor_parallelize(model, pg)
|
||||
|
||||
# optimizer
|
||||
|
||||
optimizer = HybridAdam(model.parameters(), lr=LEARNING_RATE, initial_scale=2**5)
|
||||
|
|
|
@ -3,5 +3,5 @@ torch >= 1.8.1
|
|||
datasets >= 1.8.0
|
||||
sentencepiece != 0.1.92
|
||||
protobuf
|
||||
accelerate == 0.13.2
|
||||
accelerate >= 0.20.3
|
||||
transformers
|
||||
|
|
|
@ -30,7 +30,7 @@ from itertools import chain
|
|||
import datasets
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import transformers
|
||||
import transformers.utils.logging as logging
|
||||
from accelerate.utils import set_seed
|
||||
from context import barrier_context
|
||||
from datasets import load_dataset
|
||||
|
@ -57,7 +57,7 @@ from colossalai.logging import disable_existing_loggers, get_dist_logger
|
|||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.tensor import ProcessGroup
|
||||
from colossalai.utils import get_current_device, get_dataloader
|
||||
from colossalai.zero import ColoInitContext, ZeroDDP, ZeroOptimizer
|
||||
from colossalai.zero import GeminiOptimizer
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -292,10 +292,10 @@ def main():
|
|||
|
||||
if is_main_process:
|
||||
datasets.utils.logging.set_verbosity_warning()
|
||||
transformers.utils.logging.set_verbosity_info()
|
||||
logging.set_verbosity_info()
|
||||
else:
|
||||
datasets.utils.logging.set_verbosity_error()
|
||||
transformers.utils.logging.set_verbosity_error()
|
||||
logging.set_verbosity_error()
|
||||
|
||||
if args.mem_cap > 0:
|
||||
colo_memory_cap(args.mem_cap)
|
||||
|
@ -391,16 +391,28 @@ def main():
|
|||
else:
|
||||
init_dev = get_current_device()
|
||||
|
||||
cai_version = colossalai.__version__
|
||||
logger.info(f'using Colossal-AI version {cai_version}')
|
||||
# build model
|
||||
if version.parse(cai_version) >= version.parse("0.3.1"):
|
||||
from contextlib import nullcontext
|
||||
|
||||
from colossalai.lazy import LazyInitContext
|
||||
ctx = LazyInitContext(
|
||||
default_device=init_dev
|
||||
) if args.model_name_or_path is None or args.model_name_or_path == 'facebook/opt-13b' else nullcontext()
|
||||
else:
|
||||
from colossalai.zero import ColoInitContext
|
||||
ctx = ColoInitContext(device=init_dev)
|
||||
if args.model_name_or_path is None or args.model_name_or_path == 'facebook/opt-13b':
|
||||
# currently, there has a bug in pretrained opt-13b
|
||||
# we can not import it until huggingface fix it
|
||||
logger.info("Train a new model from scratch", ranks=[0])
|
||||
with ColoInitContext(device=init_dev):
|
||||
with ctx:
|
||||
model = OPTForCausalLM(config)
|
||||
else:
|
||||
logger.info("Finetune a pre-trained model", ranks=[0])
|
||||
with ColoInitContext(device=init_dev):
|
||||
with ctx:
|
||||
model = OPTForCausalLM.from_pretrained(args.model_name_or_path,
|
||||
from_tf=bool(".ckpt" in args.model_name_or_path),
|
||||
config=config,
|
||||
|
@ -410,9 +422,10 @@ def main():
|
|||
model.gradient_checkpointing_enable()
|
||||
|
||||
PLACEMENT_POLICY = 'auto'
|
||||
cai_version = colossalai.__version__
|
||||
logger.info(f'using Colossal-AI version {cai_version}')
|
||||
if version.parse(cai_version) > version.parse("0.1.10"):
|
||||
if version.parse(cai_version) >= version.parse("0.3.1"):
|
||||
from colossalai.zero import GeminiDDP
|
||||
model = GeminiDDP(model, offload_optim_frac=1.0, pin_memory=True)
|
||||
elif version.parse(cai_version) > version.parse("0.1.10"):
|
||||
try:
|
||||
from colossalai.nn.parallel import GeminiDDP
|
||||
except ImportError:
|
||||
|
@ -536,7 +549,6 @@ def main():
|
|||
]
|
||||
|
||||
optimizer = HybridAdam(optimizer_grouped_parameters, lr=args.learning_rate)
|
||||
optimizer = ZeroOptimizer(optimizer, model, initial_scale=2**14)
|
||||
|
||||
# Scheduler and math around the number of training steps.
|
||||
overrode_max_train_steps = False
|
||||
|
@ -551,6 +563,7 @@ def main():
|
|||
num_warmup_steps=args.num_warmup_steps,
|
||||
num_training_steps=args.max_train_steps,
|
||||
)
|
||||
optimizer = GeminiOptimizer(optimizer, model, initial_scale=2**14)
|
||||
|
||||
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
||||
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
||||
|
|
|
@ -4,9 +4,9 @@ set -xue
|
|||
|
||||
pip install -r requirements.txt
|
||||
|
||||
BS=8
|
||||
BS=4
|
||||
MEMCAP=0
|
||||
GPUNUM=2
|
||||
GPUNUM=4
|
||||
MODLE="facebook/opt-125m"
|
||||
|
||||
torchrun \
|
||||
|
|
|
@ -197,11 +197,12 @@ def get_cuda_cc_flag() -> List[str]:
|
|||
import torch
|
||||
|
||||
cc_flag = []
|
||||
max_arch = ''.join(str(i) for i in torch.cuda.get_device_capability())
|
||||
for arch in torch.cuda.get_arch_list():
|
||||
res = re.search(r'sm_(\d+)', arch)
|
||||
if res:
|
||||
arch_cap = res[1]
|
||||
if int(arch_cap) >= 60:
|
||||
if int(arch_cap) >= 60 and int(arch_cap) <= int(max_arch):
|
||||
cc_flag.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}'])
|
||||
return cc_flag
|
||||
|
||||
|
|
|
@ -2,4 +2,4 @@
|
|||
markers =
|
||||
dist: tests which are run in a multi-GPU or multi-machine environment (at least 4 GPUs)
|
||||
largedist: tests which are run in a multi-GPU or multi-machine environment (at least 8 GPUs)
|
||||
addopts = --ignore=tests/test_analyzer --ignore=tests/test_auto_parallel --ignore=tests/test_autochunk --ignore=tests/test_moe
|
||||
addopts = --ignore=tests/test_analyzer --ignore=tests/test_auto_parallel --ignore=tests/test_autochunk --ignore=tests/test_moe --ignore=tests/test_fx
|
|
@ -17,6 +17,13 @@ def data_gen_fn():
|
|||
return dict(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
|
||||
|
||||
|
||||
def data_gen_for_pretrain():
|
||||
inputs = data_gen_fn()
|
||||
inputs['labels'] = inputs['input_ids'].clone()
|
||||
inputs['sentence_order_label'] = torch.zeros(BATCH_SIZE, dtype=torch.int64)
|
||||
return inputs
|
||||
|
||||
|
||||
output_transform_fn = lambda x: x
|
||||
|
||||
config = transformers.AlbertConfig(embedding_size=128,
|
||||
|
@ -26,14 +33,14 @@ config = transformers.AlbertConfig(embedding_size=128,
|
|||
intermediate_size=256)
|
||||
|
||||
model_zoo.register(name='transformers_albert',
|
||||
model_fn=lambda: transformers.AlbertModel(config),
|
||||
model_fn=lambda: transformers.AlbertModel(config, add_pooling_layer=False),
|
||||
data_gen_fn=data_gen_fn,
|
||||
output_transform_fn=output_transform_fn,
|
||||
model_attribute=ModelAttribute(has_control_flow=True))
|
||||
model_zoo.register(name='transformers_albert_for_pretraining',
|
||||
model_fn=lambda: transformers.AlbertForPreTraining(config),
|
||||
data_gen_fn=data_gen_fn,
|
||||
output_transform_fn=output_transform_fn,
|
||||
data_gen_fn=data_gen_for_pretrain,
|
||||
output_transform_fn=lambda x: dict(loss=x.loss),
|
||||
model_attribute=ModelAttribute(has_control_flow=True))
|
||||
model_zoo.register(name='transformers_albert_for_masked_lm',
|
||||
model_fn=lambda: transformers.AlbertForMaskedLM(config),
|
||||
|
|
|
@ -113,6 +113,7 @@ def data_gen_for_qa():
|
|||
output_transform_fn = lambda x: x
|
||||
|
||||
# define loss funciton
|
||||
|
||||
loss_fn_for_bert_model = lambda x: torch.nn.functional.mse_loss(x.last_hidden_state, torch.ones_like(x.last_hidden_state
|
||||
))
|
||||
loss_fn = lambda x: x.loss
|
||||
|
@ -126,7 +127,7 @@ config = transformers.BertConfig(hidden_size=128,
|
|||
|
||||
# register the BERT variants
|
||||
model_zoo.register(name='transformers_bert',
|
||||
model_fn=lambda: transformers.BertModel(config),
|
||||
model_fn=lambda: transformers.BertModel(config, add_pooling_layer=False),
|
||||
data_gen_fn=data_gen,
|
||||
output_transform_fn=output_transform_fn,
|
||||
loss_fn=loss_fn_for_bert_model,
|
||||
|
|
|
@ -57,6 +57,12 @@ def data_gen_for_sequence_classification():
|
|||
return data
|
||||
|
||||
|
||||
def date_gen_for_double_heads():
|
||||
data = data_gen_for_lm()
|
||||
data['mc_labels'] = torch.zeros(data['input_ids'].shape[0], dtype=torch.int64)
|
||||
return data
|
||||
|
||||
|
||||
# define output transform function
|
||||
output_transform_fn = lambda x: x
|
||||
|
||||
|
@ -94,8 +100,8 @@ model_zoo.register(name='transformers_gpt_lm',
|
|||
model_attribute=ModelAttribute(has_control_flow=True))
|
||||
model_zoo.register(name='transformers_gpt_double_heads',
|
||||
model_fn=lambda: transformers.GPT2DoubleHeadsModel(config),
|
||||
data_gen_fn=data_gen_for_lm,
|
||||
output_transform_fn=output_transform_fn,
|
||||
data_gen_fn=date_gen_for_double_heads,
|
||||
output_transform_fn=lambda x: dict(loss=x.loss + x.mc_loss),
|
||||
loss_fn=loss_fn,
|
||||
model_attribute=ModelAttribute(has_control_flow=True))
|
||||
model_zoo.register(name='transformers_gpt_for_question_answering',
|
||||
|
|
|
@ -12,19 +12,16 @@ from colossalai.lazy.lazy_init import LazyInitContext
|
|||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.tensor.colo_parameter import ColoParameter
|
||||
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
|
||||
from colossalai.zero import ColoInitContext
|
||||
from tests.kit.model_zoo import model_zoo
|
||||
|
||||
|
||||
def run_fn(init_method, model_fn, data_gen_fn, output_transform_fn) -> Optional[str]:
|
||||
try:
|
||||
if init_method == 'colo':
|
||||
ctx = ColoInitContext()
|
||||
elif init_method == 'lazy':
|
||||
if init_method == 'lazy':
|
||||
ctx = LazyInitContext()
|
||||
else:
|
||||
ctx = nullcontext()
|
||||
plugin = GeminiPlugin(placement_policy='cuda', strict_ddp_mode=True, max_norm=1.0, initial_scale=2**5)
|
||||
plugin = GeminiPlugin(max_norm=1.0, initial_scale=2**5)
|
||||
booster = Booster(plugin=plugin)
|
||||
with ctx:
|
||||
model = model_fn()
|
||||
|
@ -50,6 +47,7 @@ def run_fn(init_method, model_fn, data_gen_fn, output_transform_fn) -> Optional[
|
|||
optimizer.step()
|
||||
|
||||
except Exception as e:
|
||||
# raise e
|
||||
return repr(e)
|
||||
|
||||
|
||||
|
@ -57,8 +55,9 @@ def run_fn(init_method, model_fn, data_gen_fn, output_transform_fn) -> Optional[
|
|||
# @parameterize('init_method', ['lazy', 'none', 'colo'])
|
||||
|
||||
|
||||
@parameterize('subset', ['torchvision', 'transformers', 'diffusers'])
|
||||
@parameterize('init_method', ['none'])
|
||||
def check_gemini_plugin(init_method: str = 'none', early_stop: bool = True):
|
||||
def check_gemini_plugin(subset: str, init_method: str = 'none', early_stop: bool = True):
|
||||
"""check gemini plugin over model zoo
|
||||
|
||||
Args:
|
||||
|
@ -71,29 +70,23 @@ def check_gemini_plugin(init_method: str = 'none', early_stop: bool = True):
|
|||
passed_models = []
|
||||
failed_info = {} # (model_name, error) pair
|
||||
|
||||
for name, (model_fn, data_gen_fn, output_transform_fn, _, _) in model_zoo.items():
|
||||
for name, (model_fn, data_gen_fn, output_transform_fn, _, _) in model_zoo.get_sub_registry(subset).items():
|
||||
# These models lead to CUDA error
|
||||
if name in ('diffusers_auto_encoder_kl', 'diffusers_vq_model', 'diffusers_unet2d_model', 'timm_resmlp',
|
||||
'timm_gmixer_12_224', 'timm_gmlp_b16_224', 'timm_mixer_b16_224', 'timm_convnext'):
|
||||
'timm_gmixer_12_224', 'timm_gmlp_b16_224', 'timm_mixer_b16_224', 'timm_convnext',
|
||||
'torchvision_convnext_base'):
|
||||
continue
|
||||
# These models are not compatible with gemini
|
||||
if name in [
|
||||
'diffusers_clip_vision_model', 'timm_resnet', 'timm_beit', 'timm_beitv2', 'timm_eca_nfnet',
|
||||
'timm_efficientformer', 'timm_hrnet_w18_small', 'timm_nf_ecaresnet101', 'timm_nf_regnet_b0',
|
||||
'timm_skresnet18', 'timm_wide_resnet50_2', 'timm_convit', 'timm_dm_nfnet', 'timm_swin_transformer',
|
||||
'torchaudio_conformer', 'torchaudio_deepspeech', 'torchaudio_wavernn', 'torchaudio_tacotron',
|
||||
'deepfm_interactionarch', 'deepfm_simpledeepfmnn', 'dlrm', 'dlrm_interactionarch',
|
||||
'torchvision_googlenet', 'torchvision_inception_v3', 'torchvision_mobilenet_v3_small',
|
||||
'torchvision_resnet18', 'torchvision_resnext50_32x4d', 'torchvision_wide_resnet50_2',
|
||||
'torchvision_vit_b_16', 'torchvision_convnext_base', 'torchvision_swin_s', 'transformers_albert',
|
||||
'transformers_albert_for_pretraining', 'transformers_bert', 'transformers_bert_for_pretraining',
|
||||
'transformers_gpt_double_heads', 'torchaudio_hubert_base', 'torchaudio_wav2vec2_base',
|
||||
'transformers_t5_for_conditional_generation', 'transformers_t5', 'transformers_t5_encoder_model',
|
||||
'transformers_vit', 'transformers_vit_for_masked_image_modeling',
|
||||
'transformers_vit_for_image_classification', 'transformers_chatglm',
|
||||
'transformers_chatglm_for_conditional_generation', 'transformers_blip2',
|
||||
'transformers_blip2_conditional_gerneration', 'transformers_sam', 'transformers_whisper',
|
||||
'transformers_whisper_for_conditional_generation', 'transformers_whisper_for_audio_classification'
|
||||
'timm_convit',
|
||||
'timm_dm_nfnet',
|
||||
'torchvision_vit_b_16',
|
||||
'transformers_t5',
|
||||
'transformers_t5_for_conditional_generation',
|
||||
'transformers_t5_encoder_model', # does not support apex rmsnorm
|
||||
'transformers_chatglm',
|
||||
'transformers_sam',
|
||||
'transformers_vit'
|
||||
]:
|
||||
continue
|
||||
|
||||
|
@ -105,7 +98,6 @@ def check_gemini_plugin(init_method: str = 'none', early_stop: bool = True):
|
|||
]:
|
||||
continue
|
||||
err = run_fn(init_method, model_fn, data_gen_fn, output_transform_fn)
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
if err is None:
|
||||
passed_models.append(name)
|
||||
|
|
|
@ -18,12 +18,45 @@ from colossalai.testing import (
|
|||
)
|
||||
from tests.kit.model_zoo import model_zoo
|
||||
|
||||
MODEL_PLACEMENT_CONFIGS = [
|
||||
{
|
||||
'placement_policy': 'static',
|
||||
'shard_param_frac': 0.0
|
||||
}, # zero2
|
||||
{
|
||||
'placement_policy': 'static',
|
||||
'shard_param_frac': 1.0
|
||||
}, # zero3
|
||||
{
|
||||
'placement_policy': 'static',
|
||||
'shard_param_frac': 0.5
|
||||
}, # zero3-half
|
||||
]
|
||||
|
||||
OPTIM_PLACEMENT_CONFIGS = [
|
||||
{
|
||||
'placement_policy': 'static',
|
||||
'shard_param_frac': 0.0,
|
||||
'offload_optim_frac': 0.0
|
||||
}, # zero2
|
||||
{
|
||||
'placement_policy': 'static',
|
||||
'shard_param_frac': 0.0,
|
||||
'offload_optim_frac': 1.0
|
||||
}, # zero2-offload
|
||||
{
|
||||
'placement_policy': 'static',
|
||||
'shard_param_frac': 0.0,
|
||||
'offload_optim_frac': 0.5
|
||||
}, # zero2-offload-half
|
||||
]
|
||||
|
||||
|
||||
@clear_cache_before_run()
|
||||
@parameterize('placement_policy', ['cuda', 'cpu'])
|
||||
@parameterize('placement_config', MODEL_PLACEMENT_CONFIGS)
|
||||
@parameterize('model_name', ['transformers_bert_for_sequence_classification'])
|
||||
@parameterize('use_safetensors', [False, True])
|
||||
def exam_state_dict_with_origin(placement_policy, model_name, use_safetensors: bool):
|
||||
def exam_state_dict_with_origin(placement_config, model_name, use_safetensors: bool):
|
||||
from transformers import BertForSequenceClassification
|
||||
(model_fn, data_gen_fn, output_transform_fn, _, _) = next(iter(model_zoo.get_sub_registry(model_name).values()))
|
||||
bert_model = model_fn()
|
||||
|
@ -32,7 +65,7 @@ def exam_state_dict_with_origin(placement_policy, model_name, use_safetensors: b
|
|||
pretrained_path = os.path.join(tempdir, 'pretrained')
|
||||
bert_model.config.save_pretrained(save_directory=pretrained_path)
|
||||
|
||||
plugin = GeminiPlugin(placement_policy=placement_policy)
|
||||
plugin = GeminiPlugin(**placement_config)
|
||||
booster = Booster(plugin=plugin)
|
||||
bert_model, _, _, _, _ = booster.boost(bert_model)
|
||||
model_size = sum(p.numel() * p.element_size() for p in bert_model.parameters()) / 1024**2
|
||||
|
@ -46,19 +79,19 @@ def exam_state_dict_with_origin(placement_policy, model_name, use_safetensors: b
|
|||
dist.barrier()
|
||||
|
||||
new_bert_model = BertForSequenceClassification.from_pretrained(pretrained_path)
|
||||
check_state_dict_equal(bert_model.unwrap().state_dict(only_rank_0=False, dtype=torch.float32),
|
||||
check_state_dict_equal(bert_model.state_dict(only_rank_0=False, dtype=torch.float32),
|
||||
new_bert_model.state_dict(), False)
|
||||
|
||||
|
||||
@clear_cache_before_run()
|
||||
@parameterize('placement_policy', ['cuda', 'cpu'])
|
||||
@parameterize('placement_config', OPTIM_PLACEMENT_CONFIGS)
|
||||
@parameterize('shard', [False, True])
|
||||
@parameterize('model_name', ['transformers_gpt'])
|
||||
@parameterize('size_per_shard', [32])
|
||||
def exam_state_dict(placement_policy, shard: bool, model_name: str, size_per_shard: int):
|
||||
def exam_state_dict(placement_config, shard: bool, model_name: str, size_per_shard: int):
|
||||
(model_fn, data_gen_fn, output_transform_fn, _, _) = next(iter(model_zoo.get_sub_registry(model_name).values()))
|
||||
criterion = lambda x: x.mean()
|
||||
plugin = GeminiPlugin(placement_policy=placement_policy, precision="fp16", initial_scale=(2**14))
|
||||
plugin = GeminiPlugin(**placement_config, precision="fp16", initial_scale=(2**14))
|
||||
booster = Booster(plugin=plugin)
|
||||
|
||||
model = model_fn()
|
||||
|
@ -87,12 +120,11 @@ def exam_state_dict(placement_policy, shard: bool, model_name: str, size_per_sha
|
|||
dist.barrier()
|
||||
|
||||
booster.load_model(new_model, model_ckpt_path)
|
||||
check_state_dict_equal(model.unwrap().state_dict(only_rank_0=False),
|
||||
new_model.unwrap().state_dict(only_rank_0=False), False)
|
||||
check_state_dict_equal(model.state_dict(only_rank_0=False), new_model.state_dict(only_rank_0=False), False)
|
||||
|
||||
booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
|
||||
check_state_dict_equal(optimizer.unwrap().state_dict(only_rank_0=False),
|
||||
new_optimizer.unwrap().state_dict(only_rank_0=False), False)
|
||||
check_state_dict_equal(optimizer.state_dict(only_rank_0=False), new_optimizer.state_dict(only_rank_0=False),
|
||||
False)
|
||||
|
||||
# Check the new model/optimizer can successfully run.
|
||||
data = data_gen_fn()
|
||||
|
|
|
@ -60,12 +60,11 @@ def exam_torch_load_from_gemini(shard: bool, model_name: str):
|
|||
new_booster.load_model(new_model, model_ckpt_path, strict=True)
|
||||
|
||||
# Add prefix to get aligned with pytorch parameter names.
|
||||
check_state_dict_equal(
|
||||
model.unwrap().state_dict(only_rank_0=False, prefix='module.module.', dtype=torch.float32),
|
||||
new_model.state_dict(), False)
|
||||
check_state_dict_equal(model.state_dict(only_rank_0=False, prefix='module.module.', dtype=torch.float32),
|
||||
new_model.state_dict(), False)
|
||||
|
||||
new_booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
|
||||
check_state_dict_equal(optimizer.unwrap().state_dict(only_rank_0=False), new_optimizer.state_dict(), False)
|
||||
check_state_dict_equal(optimizer.state_dict(only_rank_0=False), new_optimizer.state_dict(), False)
|
||||
|
||||
# Check the new model/optimizer can successfully run.
|
||||
data = data_gen_fn()
|
||||
|
@ -124,13 +123,12 @@ def exam_gemini_load_from_torch(shard: bool, model_name: str):
|
|||
new_booster.load_model(new_model, model_ckpt_path, strict=True)
|
||||
|
||||
# Add prefix to get aligned with pytorch parameter names.
|
||||
check_state_dict_equal(
|
||||
new_model.unwrap().state_dict(only_rank_0=False, prefix='module.module.', dtype=torch.float32),
|
||||
model.state_dict(), False)
|
||||
check_state_dict_equal(new_model.state_dict(only_rank_0=False, prefix='module.module.', dtype=torch.float32),
|
||||
model.state_dict(), False)
|
||||
|
||||
new_booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
|
||||
old_state_dict = optimizer.state_dict()
|
||||
new_state_dict = new_optimizer.unwrap().state_dict(only_rank_0=False)
|
||||
new_state_dict = new_optimizer.state_dict(only_rank_0=False)
|
||||
|
||||
# Comparison of param_groups needs special care here,
|
||||
# since not all hyperparameters in Adam are used by HybridAdam
|
||||
|
@ -138,7 +136,7 @@ def exam_gemini_load_from_torch(shard: bool, model_name: str):
|
|||
for old_group, new_group in zip(old_state_dict['param_groups'], new_state_dict['param_groups']):
|
||||
for k in hyperparameters_to_examine:
|
||||
assert k in old_group and k in new_group, \
|
||||
f"Old group's keys: {list(old_group.keys())}, New group's keys: {list(new_group.keys())}"
|
||||
f"Old group's keys: {list(old_group.keys())}, New group's keys: {list(new_group.keys())}"
|
||||
assert old_group[k] == new_group[k]
|
||||
check_state_dict_equal(old_state_dict['state'], new_state_dict['state'], False)
|
||||
|
||||
|
|
|
@ -16,19 +16,21 @@ from colossalai.testing import (
|
|||
)
|
||||
|
||||
|
||||
# stage 1 and 2 process the optimizer/mode the same way
|
||||
# only test 2 is fine
|
||||
@clear_cache_before_run()
|
||||
@parameterize('stage', [2])
|
||||
@parameterize('shard', [True, False])
|
||||
def check_low_level_zero_checkpointIO(stage: int, shard: bool):
|
||||
plugin = LowLevelZeroPlugin(stage=stage, max_norm=1.0, initial_scale=32)
|
||||
@parameterize('offload', [False, True])
|
||||
def check_low_level_zero_checkpointIO(stage: int, shard: bool, offload: bool):
|
||||
plugin = LowLevelZeroPlugin(stage=stage, max_norm=1.0, initial_scale=32, cpu_offload=offload)
|
||||
booster = Booster(plugin=plugin)
|
||||
model = resnet18()
|
||||
criterion = lambda x: x.mean()
|
||||
optimizer = HybridAdam((model.parameters()), lr=0.001)
|
||||
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
|
||||
|
||||
x = torch.randn(4, 3, 224, 224)
|
||||
x = x.to('cuda')
|
||||
x = torch.randn(1, 3, 224, 224, device='cuda')
|
||||
output = model(x)
|
||||
loss = criterion(output)
|
||||
booster.backward(loss, optimizer)
|
||||
|
@ -50,15 +52,17 @@ def check_low_level_zero_checkpointIO(stage: int, shard: bool):
|
|||
check_state_dict_equal(model.state_dict(), new_model.state_dict(), False)
|
||||
|
||||
booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
|
||||
check_state_dict_equal(optimizer.state_dict(), new_optimizer.state_dict(), False)
|
||||
check_state_dict_equal(optimizer.optim.state_dict(), new_optimizer.optim.state_dict(), False)
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port):
|
||||
colossalai.launch(config=(dict()), rank=rank, world_size=world_size, port=port, host='localhost')
|
||||
check_low_level_zero_checkpointIO()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
@rerun_if_address_is_in_use()
|
||||
@clear_cache_before_run()
|
||||
def test_low_level_zero_checkpointIO():
|
||||
spawn(run_dist, 2)
|
||||
|
||||
|
|
|
@ -1,104 +0,0 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from torchvision import transforms
|
||||
from torchvision.datasets import CIFAR10
|
||||
|
||||
import colossalai
|
||||
from colossalai.amp import AMP_TYPE
|
||||
from colossalai.context import ParallelMode
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.engine.schedule._pipeline_schedule_v2 import PipelineScheduleV2
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.nn import CrossEntropyLoss
|
||||
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
|
||||
from colossalai.pipeline.pipelinable import PipelinableContext
|
||||
from colossalai.testing import rerun_if_address_is_in_use, spawn
|
||||
from colossalai.trainer import Trainer, hooks
|
||||
from colossalai.utils import get_dataloader
|
||||
|
||||
disable_existing_loggers()
|
||||
BATCH_SIZE = 4
|
||||
NUM_EPOCHS = 10
|
||||
WARMUP_EPOCHS = 5
|
||||
CONFIG = dict(NUM_MICRO_BATCHES=2,
|
||||
parallel=dict(pipeline=2, tensor=dict(size=1, mode='1d')),
|
||||
fp16=dict(mode=AMP_TYPE.NAIVE),
|
||||
gradient_accumulation=2)
|
||||
|
||||
|
||||
def run_trainer(rank, world_size, port):
|
||||
disable_existing_loggers()
|
||||
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
||||
|
||||
disable_existing_loggers()
|
||||
# get logger
|
||||
logger = get_dist_logger()
|
||||
|
||||
pipelinable = PipelinableContext()
|
||||
try:
|
||||
from titans.model.vit import vit_tiny_patch4_32
|
||||
except ImportError:
|
||||
logger.warning('skip the test_cifar_with_data_pipeline_tensor test because titan is not installed')
|
||||
logger.warning('please install titan from https://github.com/hpcaitech/Titans')
|
||||
return
|
||||
with pipelinable:
|
||||
model = vit_tiny_patch4_32()
|
||||
pipelinable.to_layer_list()
|
||||
pipelinable.policy = "uniform"
|
||||
model = pipelinable.partition(1, gpc.pipeline_parallel_size, gpc.get_local_rank(ParallelMode.PIPELINE))
|
||||
|
||||
# create dataloaders
|
||||
root = Path(os.environ['DATA'])
|
||||
transform_train = transforms.Compose([
|
||||
transforms.RandomCrop(32, padding=4, pad_if_needed=True),
|
||||
transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
||||
])
|
||||
train_dataset = CIFAR10(root=root, train=True, download=True, transform=transform_train)
|
||||
train_dataloader = get_dataloader(dataset=train_dataset, shuffle=True, batch_size=BATCH_SIZE, pin_memory=True)
|
||||
|
||||
# create loss function
|
||||
criterion = CrossEntropyLoss(label_smoothing=0.1)
|
||||
|
||||
# create optimizer
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=0.001, weight_decay=0)
|
||||
|
||||
# create lr scheduler
|
||||
lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer, total_steps=NUM_EPOCHS, warmup_steps=WARMUP_EPOCHS)
|
||||
|
||||
# initialize
|
||||
engine, train_dataloader, *_ = colossalai.initialize(model=model,
|
||||
optimizer=optimizer,
|
||||
criterion=criterion,
|
||||
train_dataloader=train_dataloader)
|
||||
|
||||
engine._schedule = PipelineScheduleV2(num_microbatches=gpc.config.NUM_MICRO_BATCHES)
|
||||
|
||||
logger = get_dist_logger()
|
||||
|
||||
trainer = Trainer(engine=engine, logger=logger)
|
||||
|
||||
hook_list = [
|
||||
hooks.LRSchedulerHook(lr_scheduler=lr_scheduler, by_epoch=False),
|
||||
]
|
||||
|
||||
trainer.fit(train_dataloader=train_dataloader,
|
||||
max_steps=2,
|
||||
epochs=NUM_EPOCHS,
|
||||
hooks=hook_list,
|
||||
display_progress=True)
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_hybrid_parallel():
|
||||
spawn(run_trainer, 2)
|
||||
disable_existing_loggers()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_hybrid_parallel()
|
|
@ -1,92 +0,0 @@
|
|||
import os
|
||||
import random
|
||||
from typing import Callable, Type
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
||||
import colossalai
|
||||
from colossalai.nn.parallel import ColoDDP
|
||||
from colossalai.tensor import ProcessGroup
|
||||
from colossalai.testing import rerun_if_address_is_in_use, spawn
|
||||
from colossalai.utils.cuda import get_current_device
|
||||
from colossalai.zero import ColoInitContext, ZeroDDP
|
||||
from colossalai.zero.gemini.chunk import ChunkManager, search_chunk_configuration
|
||||
from colossalai.zero.gemini.gemini_mgr import GeminiManager
|
||||
|
||||
|
||||
def set_seed(seed):
|
||||
random.seed(seed)
|
||||
os.environ['PYTHONHASHSEED'] = str(seed)
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed(seed)
|
||||
torch.backends.cudnn.deterministic = True
|
||||
|
||||
|
||||
def init_ddp(module: torch.nn.Module) -> ColoDDP:
|
||||
pg = ProcessGroup()
|
||||
return ColoDDP(module, process_group=pg)
|
||||
|
||||
|
||||
def init_ddpv2(module: torch.nn.Module) -> ZeroDDP:
|
||||
chunk_config, *_ = search_chunk_configuration(module, 4, 1024)
|
||||
chunk_manager = ChunkManager(chunk_config)
|
||||
gemini_manager = GeminiManager('cuda', chunk_manager)
|
||||
return ZeroDDP(module, gemini_manager)
|
||||
|
||||
|
||||
class Net(torch.nn.Module):
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.fc1 = torch.nn.Linear(3, 3, bias=False)
|
||||
self.fc2 = torch.nn.Linear(3, 1, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
return self.fc2(self.fc1(x))
|
||||
|
||||
|
||||
def run_fwd_bwd(ddp_cls: Type[ColoDDP], init_ddp_func: Callable[[torch.nn.Module], ColoDDP]):
|
||||
with ColoInitContext(device=get_current_device()):
|
||||
model = Net().cuda()
|
||||
w1 = model.fc1.weight
|
||||
w2 = model.fc2.weight
|
||||
ddp_cls.set_params_to_ignore([w2])
|
||||
model = init_ddp_func(model)
|
||||
x = torch.rand(2, 3, device=get_current_device())
|
||||
logits = model(x)
|
||||
loss = torch.sum(logits)
|
||||
model.backward(loss)
|
||||
|
||||
if ddp_cls is ZeroDDP:
|
||||
w1s_grad = w1
|
||||
else:
|
||||
w1s_grad = w1.grad
|
||||
|
||||
w1_grads = [torch.empty_like(w1) for _ in range(dist.get_world_size())]
|
||||
dist.all_gather(w1_grads, w1s_grad)
|
||||
assert torch.equal(w1_grads[0], w1_grads[1])
|
||||
w2_grads = [torch.empty_like(w2) for _ in range(dist.get_world_size())]
|
||||
dist.all_gather(w2_grads, w2.grad)
|
||||
assert not torch.equal(w2_grads[0], w2_grads[1])
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
||||
set_seed(dist.get_rank())
|
||||
run_fwd_bwd(ColoDDP, init_ddp)
|
||||
run_fwd_bwd(ZeroDDP, init_ddpv2)
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
@pytest.mark.parametrize('world_size', [2])
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_ddp_ignore_params(world_size):
|
||||
spawn(run_dist, world_size)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_ddp_ignore_params(2)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue