You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
ColossalAI/colossalai/utils/__init__.py

28 lines
1.5 KiB

from .activation_checkpoint import checkpoint
from .common import (print_rank_0, sync_model_param_in_dp, is_dp_rank_0,
is_tp_rank_0, is_no_pp_or_last_stage, is_using_ddp,
is_using_pp, conditional_context, is_model_parallel_parameter,
clip_grad_norm_fp32, count_zeros_fp32, copy_tensor_parallel_attributes,
param_is_not_tensor_parallel_duplicate, switch_virtual_pipeline_parallel_rank)
from .cuda import get_current_device, synchronize, empty_cache, set_to_cuda
from .memory import report_memory_usage
from .timer import MultiTimer, Timer
from .multi_tensor_apply import multi_tensor_applier
from .gradient_accumulation import accumulate_gradient
from .data_sampler import DataParallelSampler, get_dataloader
__all__ = ['checkpoint',
'print_rank_0', 'sync_model_param_in_dp', 'is_dp_rank_0',
'is_tp_rank_0', 'is_no_pp_or_last_stage', 'is_using_ddp',
'is_using_pp', 'conditional_context', 'is_model_parallel_parameter',
'clip_grad_norm_fp32', 'count_zeros_fp32', 'copy_tensor_parallel_attributes',
'param_is_not_tensor_parallel_duplicate',
'get_current_device', 'synchronize', 'empty_cache', 'set_to_cuda',
'report_memory_usage',
'Timer', 'MultiTimer',
'multi_tensor_applier',
'accumulate_gradient',
'DataParallelSampler', 'get_dataloader',
'switch_virtual_pipeline_parallel_rank'
]