2021-10-28 16:21:23 +00:00
|
|
|
import copy
|
|
|
|
import heapq
|
|
|
|
|
|
|
|
from colossalai.builder import build_model, build_layer
|
|
|
|
from colossalai.context.parallel_mode import ParallelMode
|
|
|
|
from colossalai.core import global_context as gpc
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
from colossalai.logging import get_dist_logger
|
2021-12-20 15:26:19 +00:00
|
|
|
import torch.nn as nn
|
2021-10-28 16:21:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
def _binary_partition(weights, st, ed):
|
|
|
|
"""Returns the binary partition position of `weights`, given the start
|
|
|
|
position `st` and the end position `ed`.
|
|
|
|
|
2022-03-25 05:02:39 +00:00
|
|
|
Args:
|
|
|
|
weights (list): A python list to be binary partitioned
|
|
|
|
st (int): the start position of the binary partition
|
|
|
|
ed (int): the end position of the binary partition
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
int: the binary partition position of `weights`
|
2021-10-28 16:21:23 +00:00
|
|
|
"""
|
|
|
|
w_sum = weights[ed - 1]
|
|
|
|
prefix = 0
|
|
|
|
if st > 0:
|
|
|
|
w_sum -= weights[st - 1]
|
|
|
|
prefix = weights[st - 1]
|
|
|
|
minimum = float("inf")
|
|
|
|
for idx in range(st + 1, ed):
|
|
|
|
front = weights[idx - 1] - prefix
|
|
|
|
diff = abs(w_sum - 2 * front)
|
|
|
|
if diff < minimum:
|
|
|
|
pos = idx
|
|
|
|
minimum = diff
|
|
|
|
|
|
|
|
return st, pos, ed
|
|
|
|
|
|
|
|
|
|
|
|
def _heap_addition(weights, intervals, add_cnt):
|
|
|
|
"""
|
|
|
|
"""
|
2022-04-02 06:22:41 +00:00
|
|
|
|
2021-10-28 16:21:23 +00:00
|
|
|
def _heap_push(heap, st, ed):
|
|
|
|
value = weights[ed - 1]
|
|
|
|
if st > 0:
|
|
|
|
value -= weights[st - 1]
|
|
|
|
heapq.heappush(heap, (-value, st, ed))
|
|
|
|
|
|
|
|
ret_intervals = []
|
|
|
|
heap = []
|
|
|
|
|
|
|
|
for st, ed in intervals:
|
|
|
|
_heap_push(heap, st, ed)
|
|
|
|
|
|
|
|
while add_cnt > 0:
|
|
|
|
_, st, ed = heapq.heappop(heap)
|
|
|
|
if ed - st == 1:
|
|
|
|
ret_intervals.append((st, ed))
|
|
|
|
else:
|
|
|
|
l, m, r = _binary_partition(weights, st, ed)
|
|
|
|
_heap_push(heap, l, m)
|
|
|
|
_heap_push(heap, m, r)
|
|
|
|
add_cnt -= 1
|
|
|
|
|
|
|
|
while heap:
|
|
|
|
_, st, ed = heapq.heappop(heap)
|
|
|
|
ret_intervals.append((st, ed))
|
|
|
|
|
|
|
|
ret_intervals.sort()
|
|
|
|
return ret_intervals
|
|
|
|
|
|
|
|
|
|
|
|
def _calc_partitions(weights, value):
|
|
|
|
prev = 0
|
|
|
|
prefix = 0
|
|
|
|
num_block = 0
|
|
|
|
intervals = []
|
|
|
|
|
|
|
|
for idx, w in enumerate(weights):
|
|
|
|
if weights[idx] - prefix > value:
|
|
|
|
intervals.append((prev, idx))
|
|
|
|
prev = idx
|
|
|
|
prefix = weights[idx - 1]
|
|
|
|
num_block += 1
|
|
|
|
|
|
|
|
intervals.append((prev, len(weights)))
|
|
|
|
return num_block + 1, intervals
|
|
|
|
|
|
|
|
|
|
|
|
def _binary_search(weights, num):
|
|
|
|
length = len(weights)
|
|
|
|
prefix = [1 if w == 0 else w for w in weights]
|
|
|
|
for i in range(1, length):
|
|
|
|
prefix[i] += prefix[i - 1]
|
|
|
|
|
|
|
|
lower_bound = max(weights)
|
|
|
|
upper_bound = prefix[length - 1]
|
|
|
|
|
|
|
|
while upper_bound > lower_bound:
|
|
|
|
mid = (upper_bound + lower_bound) // 2
|
|
|
|
number, _ = _calc_partitions(prefix, mid)
|
|
|
|
if number <= num:
|
|
|
|
upper_bound = mid
|
|
|
|
else:
|
|
|
|
lower_bound = mid + 1
|
|
|
|
|
|
|
|
num_block, intervals = _calc_partitions(prefix, upper_bound)
|
|
|
|
if num_block < num:
|
|
|
|
intervals = _heap_addition(prefix, intervals, num - num_block)
|
|
|
|
|
|
|
|
return intervals
|
|
|
|
|
|
|
|
|
2021-12-30 07:56:46 +00:00
|
|
|
def partition_uniform(num_items, pipeline_parallel_size, num_chunks):
|
2021-10-28 16:21:23 +00:00
|
|
|
assert num_items % num_chunks == 0, \
|
|
|
|
"Layer length should be divided by the number of chunks, otherwise parameter method is recomended"
|
|
|
|
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
logger = get_dist_logger()
|
|
|
|
parts = [[] for _ in range(pipeline_parallel_size)]
|
2021-10-28 16:21:23 +00:00
|
|
|
partition_items = num_items // num_chunks
|
|
|
|
for idx in range(num_chunks):
|
|
|
|
base_idx = idx * partition_items
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
chunk_size = partition_items // pipeline_parallel_size
|
|
|
|
left = pipeline_parallel_size - partition_items % pipeline_parallel_size
|
2021-10-28 16:21:23 +00:00
|
|
|
if chunk_size == 0:
|
|
|
|
logger.warning("Some nodes in Pipeline have no requests")
|
|
|
|
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
for p in range(pipeline_parallel_size):
|
2021-10-28 16:21:23 +00:00
|
|
|
st = base_idx
|
|
|
|
base_idx += chunk_size + (p >= left)
|
|
|
|
parts[p].append((st, base_idx))
|
|
|
|
|
|
|
|
return parts
|
|
|
|
|
|
|
|
|
2021-12-30 07:56:46 +00:00
|
|
|
def partition_balanced(weights, pipeline_parallel_size, num_chunks):
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
num_total = pipeline_parallel_size * num_chunks
|
2021-10-28 16:21:23 +00:00
|
|
|
num_items = len(weights)
|
|
|
|
if num_items <= num_total:
|
2021-12-30 07:56:46 +00:00
|
|
|
return partition_uniform(num_items, pipeline_parallel_size, num_chunks)
|
2021-10-28 16:21:23 +00:00
|
|
|
|
|
|
|
intervals = _binary_search(weights, num_total)
|
|
|
|
|
|
|
|
current = 0
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
parts = [[] for _ in range(pipeline_parallel_size)]
|
2021-10-28 16:21:23 +00:00
|
|
|
for inter in intervals:
|
|
|
|
parts[current].append(inter)
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
current = (current + 1) % pipeline_parallel_size
|
2021-10-28 16:21:23 +00:00
|
|
|
|
|
|
|
return parts
|
|
|
|
|
|
|
|
|
2021-12-30 07:56:46 +00:00
|
|
|
def count_layer_params(layers):
|
2021-12-20 15:26:19 +00:00
|
|
|
"""Count the number of parameters in each layer
|
|
|
|
"""
|
|
|
|
param_counts = [0] * len(layers)
|
|
|
|
for idx, cfg in enumerate(layers):
|
|
|
|
layer = build_layer(cfg)
|
|
|
|
params = filter(lambda p: p.requires_grad, layer.parameters())
|
|
|
|
param_counts[idx] = sum(p.numel() for p in params)
|
|
|
|
|
|
|
|
return param_counts
|
|
|
|
|
|
|
|
|
2022-04-02 06:22:41 +00:00
|
|
|
def build_pipeline_model_from_cfg(config,
|
|
|
|
num_chunks: int = 1,
|
|
|
|
partition_method: str = 'parameter',
|
|
|
|
verbose: bool = False):
|
2022-03-31 03:36:56 +00:00
|
|
|
"""An initializer to split the model into different stages for pipeline parallelism.
|
2021-12-13 14:07:01 +00:00
|
|
|
|
|
|
|
An example for the model config is shown below. The class VisionTransformerFromConfig should
|
|
|
|
inherit colossalai.nn.model.ModelFromConfig to allow this initializer to build model from a sequence
|
|
|
|
of layer configurations.
|
|
|
|
|
2022-03-31 03:36:56 +00:00
|
|
|
::
|
|
|
|
|
|
|
|
model_config = dict(
|
|
|
|
type='VisionTransformerFromConfig',
|
|
|
|
embedding_cfg=dict(...),
|
|
|
|
...
|
|
|
|
)
|
2021-12-13 14:07:01 +00:00
|
|
|
|
2022-03-25 05:02:39 +00:00
|
|
|
Args:
|
|
|
|
config (dict): Configuration of the model.
|
|
|
|
num_chunks (int, optional): The number of chunks you want to have on the current stage.
|
|
|
|
This value should be 1 in most cases unless you are using virtual pipeline parallelism.
|
|
|
|
partition_method (str, optional): This parameter determines how you want to split your model
|
|
|
|
layers into stages, you can set it as 'layer' or 'parameter'.
|
|
|
|
verbose (bool, optional): Whether to print the logs.
|
2021-12-13 14:07:01 +00:00
|
|
|
"""
|
2021-12-20 15:26:19 +00:00
|
|
|
ori_model = build_model(config)
|
|
|
|
layers = ori_model.layers_cfg
|
|
|
|
layer_length = len(layers)
|
|
|
|
logger = get_dist_logger()
|
|
|
|
if verbose:
|
|
|
|
logger.info(f"The total length of layers is {layer_length}", ranks=[0])
|
|
|
|
|
|
|
|
pipeline_parallel_size = gpc.get_world_size(ParallelMode.PIPELINE)
|
|
|
|
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
|
|
|
|
|
|
|
|
method = partition_method.lower()
|
|
|
|
# Make a partition
|
|
|
|
if method == 'layer':
|
|
|
|
num_layers = len(layers)
|
2021-12-30 07:56:46 +00:00
|
|
|
parts = partition_uniform(num_layers, pipeline_parallel_size, num_chunks)
|
2021-12-20 15:26:19 +00:00
|
|
|
elif method == 'parameter':
|
2021-12-30 07:56:46 +00:00
|
|
|
param_counts = count_layer_params(layers)
|
2021-12-20 15:26:19 +00:00
|
|
|
# print_rank_0(param_counts)
|
2021-12-30 07:56:46 +00:00
|
|
|
parts = partition_balanced(param_counts, pipeline_parallel_size, num_chunks)
|
2021-12-20 15:26:19 +00:00
|
|
|
else:
|
|
|
|
raise ValueError("Method should be a pre-set string in [layer, parameter]")
|
|
|
|
|
|
|
|
# Display the partition
|
|
|
|
if verbose:
|
|
|
|
log_str = 'Layer allocation after partitioning: \n'
|
|
|
|
for stage in range(pipeline_parallel_size):
|
|
|
|
|
|
|
|
num_layers = 0
|
|
|
|
for st, ed in parts[stage]:
|
|
|
|
num_layers += ed - st
|
|
|
|
|
|
|
|
log_str += f'\n===== stage={stage}, layers={num_layers} =====\n'
|
|
|
|
for st, ed in parts[stage]:
|
2022-04-02 06:22:41 +00:00
|
|
|
for idx, layer in enumerate(layers[st:ed]):
|
2021-12-20 15:26:19 +00:00
|
|
|
log_str += f'\t{idx + st:2d}: {layer}\n'
|
|
|
|
logger.info(log_str, ranks=[0])
|
|
|
|
|
|
|
|
# Save the partition
|
|
|
|
interval = parts[pipeline_rank]
|
|
|
|
|
|
|
|
models = []
|
|
|
|
for st, ed in interval:
|
|
|
|
model = copy.deepcopy(ori_model)
|
|
|
|
model.build_from_cfg(st, ed)
|
|
|
|
models.append(model)
|
|
|
|
|
|
|
|
return nn.ModuleList(models) if len(models) > 1 else models[0]
|
|
|
|
|
|
|
|
|
|
|
|
def build_pipeline_model(layers: nn.Sequential, num_chunks: int = 1, verbose: bool = False):
|
|
|
|
"""An intializer to split the model into different stages for pipeline parallelism.
|
|
|
|
Note that `layer` must be `torch.nn.Sequential`.
|
2022-03-25 05:02:39 +00:00
|
|
|
Args:
|
|
|
|
layers (`torch.nn.Sequential`): Layers of model
|
|
|
|
num_chunks: The number of chunks you want to have on the current stage. This value should be 1
|
|
|
|
in most cases unless you are using virtual pipeline parallelism.
|
|
|
|
verbose (bool, optional): Whether to print the logs.
|
2021-12-20 15:26:19 +00:00
|
|
|
"""
|
|
|
|
pipeline_parallel_size = gpc.get_world_size(ParallelMode.PIPELINE)
|
|
|
|
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
|
2021-12-30 07:56:46 +00:00
|
|
|
partitions = partition_uniform(len(layers), pipeline_parallel_size, num_chunks)
|
2021-12-20 15:26:19 +00:00
|
|
|
module_list = []
|
|
|
|
for start, end in partitions[pipeline_rank]:
|
2022-05-13 13:52:27 +00:00
|
|
|
module_list.append(
|
|
|
|
nn.Sequential(*[nn.Identity() for _ in range(start)], *layers[start:end],
|
|
|
|
*[nn.Identity() for _ in range(len(layers) - end)]))
|
2021-12-20 15:26:19 +00:00
|
|
|
if verbose:
|
|
|
|
logger = get_dist_logger()
|
|
|
|
logger.info(f'Total {len(layers)} layers', ranks=[0])
|
|
|
|
for rank, part in enumerate(partitions):
|
|
|
|
log_str = f'===== stage={rank} =====\n'
|
|
|
|
for chunk, (start, end) in enumerate(part):
|
|
|
|
log_str += f'===== chunk={chunk}, layer=[{start}-{end}] =====\n'
|
|
|
|
log_str += '\n'.join([str(layer) for layer in layers[start:end]]) + '\n'
|
|
|
|
logger.info(log_str, ranks=[0])
|
|
|
|
return nn.ModuleList(module_list) if len(module_list) > 1 else module_list[0]
|