[bug] fix get_default_parser in examples (#4764)

pull/4809/head
Baizhou Zhang 2023-09-21 10:42:25 +08:00 committed by GitHub
parent c0a033700c
commit df66741f77
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 36 additions and 26 deletions

View File

@ -1,4 +1,11 @@
from .initialize import initialize, launch, launch_from_openmpi, launch_from_slurm, launch_from_torch from .initialize import (
get_default_parser,
initialize,
launch,
launch_from_openmpi,
launch_from_slurm,
launch_from_torch,
)
__all__ = [ __all__ = [
"launch", "launch",
@ -6,4 +13,5 @@ __all__ = [
"launch_from_slurm", "launch_from_slurm",
"launch_from_torch", "launch_from_torch",
"initialize", "initialize",
"get_default_parser",
] ]

View File

@ -1,10 +1,10 @@
import colossalai import argparse
__all__ = ["parse_args"] __all__ = ["parse_args"]
def parse_args(): def parse_args():
parser = colossalai.get_default_parser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
"--distplan", "--distplan",

View File

@ -1,8 +1,8 @@
from colossalai import get_default_parser import argparse
def parse_demo_args(): def parse_demo_args():
parser = get_default_parser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
"--model_name_or_path", "--model_name_or_path",
type=str, type=str,
@ -52,7 +52,7 @@ def parse_demo_args():
def parse_benchmark_args(): def parse_benchmark_args():
parser = get_default_parser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
"--model_name_or_path", "--model_name_or_path",

View File

@ -11,9 +11,9 @@ for PLUGIN in "torch_ddp" "torch_ddp_fp16" "low_level_zero" "gemini" "hybrid_par
do do
MODEL_PATH="google/vit-base-patch16-224" MODEL_PATH="google/vit-base-patch16-224"
torchrun \ colossalai run \
--standalone \ --nproc_per_node ${GPUNUM} \
--nproc_per_node 4 \ --master_port 29505 \
vit_benchmark.py \ vit_benchmark.py \
--model_name_or_path ${MODEL_PATH} \ --model_name_or_path ${MODEL_PATH} \
--mem_cap ${MEMCAP} \ --mem_cap ${MEMCAP} \

View File

@ -35,9 +35,9 @@ WEIGHT_DECAY=0.05
WARMUP_RATIO=0.3 WARMUP_RATIO=0.3
# run the script for demo # run the script for demo
torchrun \ colossalai run \
--standalone \
--nproc_per_node ${GPUNUM} \ --nproc_per_node ${GPUNUM} \
--master_port 29505 \
vit_train_demo.py \ vit_train_demo.py \
--model_name_or_path ${MODEL} \ --model_name_or_path ${MODEL} \
--output_path ${OUTPUT_PATH} \ --output_path ${OUTPUT_PATH} \

View File

@ -5,9 +5,9 @@ BS=8
for PLUGIN in "torch_ddp" "torch_ddp_fp16" "low_level_zero" "gemini" "hybrid_parallel" for PLUGIN in "torch_ddp" "torch_ddp_fp16" "low_level_zero" "gemini" "hybrid_parallel"
do do
torchrun \ colossalai run \
--standalone \
--nproc_per_node 4 \ --nproc_per_node 4 \
--master_port 29505 \
vit_benchmark.py \ vit_benchmark.py \
--model_name_or_path "google/vit-base-patch16-224" \ --model_name_or_path "google/vit-base-patch16-224" \
--plugin ${PLUGIN} \ --plugin ${PLUGIN} \

View File

@ -1,3 +1,4 @@
import argparse
import contextlib import contextlib
import os import os
@ -29,7 +30,7 @@ VOCAB_SIZE = 50257
def main(): def main():
parser = colossalai.get_default_parser() parser = argparse.ArgumentParser()
parser.add_argument("--from_torch", default=False, action="store_true") parser.add_argument("--from_torch", default=False, action="store_true")
parser.add_argument("--use_dummy_dataset", default=False, action="store_true") parser.add_argument("--use_dummy_dataset", default=False, action="store_true")
args = parser.parse_args() args = parser.parse_args()

View File

@ -1,8 +1,8 @@
from colossalai import get_default_parser import argparse
def parse_demo_args(): def parse_demo_args():
parser = get_default_parser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
"--model_name_or_path", "--model_name_or_path",
type=str, type=str,
@ -39,7 +39,7 @@ def parse_demo_args():
def parse_benchmark_args(): def parse_benchmark_args():
parser = get_default_parser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
"--model_name_or_path", "--model_name_or_path",
type=str, type=str,

View File

@ -16,9 +16,9 @@ for GPUNUM in 1 4
do do
MODLE_PATH="facebook/opt-${MODEL}" MODLE_PATH="facebook/opt-${MODEL}"
torchrun \ colossalai run \
--standalone \
--nproc_per_node ${GPUNUM} \ --nproc_per_node ${GPUNUM} \
--master_port 29505 \
opt_benchmark.py \ opt_benchmark.py \
--model_name_or_path ${MODLE_PATH} \ --model_name_or_path ${MODLE_PATH} \
--mem_cap ${MEMCAP} \ --mem_cap ${MEMCAP} \

View File

@ -30,9 +30,9 @@ WEIGHT_DECAY=0.01
WARMUP_RATIO=0.1 WARMUP_RATIO=0.1
# run the script for demo # run the script for demo
torchrun \ colossalai run \
--standalone \
--nproc_per_node ${GPUNUM} \ --nproc_per_node ${GPUNUM} \
--master_port 29505 \
opt_train_demo.py \ opt_train_demo.py \
--model_name_or_path ${MODEL} \ --model_name_or_path ${MODEL} \
--output_path ${OUTPUT_PATH} \ --output_path ${OUTPUT_PATH} \

View File

@ -7,9 +7,9 @@ do
for GPUNUM in 1 4 for GPUNUM in 1 4
do do
torchrun \ colossalai run \
--standalone \
--nproc_per_node ${GPUNUM} \ --nproc_per_node ${GPUNUM} \
--master_port 29505 \
opt_benchmark.py \ opt_benchmark.py \
--model_name_or_path "facebook/opt-125m" \ --model_name_or_path "facebook/opt-125m" \
--plugin ${PLUGIN} \ --plugin ${PLUGIN} \

View File

@ -8,6 +8,6 @@ export PLACEMENT='cpu'
export USE_SHARD_INIT=False export USE_SHARD_INIT=False
export BATCH_SIZE=1 export BATCH_SIZE=1
env OMP_NUM_THREADS=12 torchrun --standalone --nproc_per_node=${GPUNUM} --master_port 29501 train.py \ env OMP_NUM_THREADS=12 colossalai run --nproc_per_node ${GPUNUM} --master_port 29505 train.py \
--dummy_data=True --tp_degree=${TPDEGREE} --batch_size=${BATCH_SIZE} --plugin='gemini' \ --dummy_data=True --tp_degree=${TPDEGREE} --batch_size=${BATCH_SIZE} --plugin='gemini' \
--placement ${PLACEMENT} --shardinit ${USE_SHARD_INIT} --distplan ${DISTPAN} 2>&1 | tee run.log --placement ${PLACEMENT} --shardinit ${USE_SHARD_INIT} --distplan ${DISTPAN} 2>&1 | tee run.log

View File

@ -4,6 +4,6 @@ for BATCH_SIZE in 2
do do
for GPUNUM in 1 4 for GPUNUM in 1 4
do do
env OMP_NUM_THREADS=12 torchrun --standalone --nproc_per_node=${GPUNUM} --standalone train.py --dummy_data=True --batch_size=${BATCH_SIZE} --plugin='gemini' 2>&1 | tee run.log env OMP_NUM_THREADS=12 colossalai run --nproc_per_node ${GPUNUM} --master_port 29505 train.py --dummy_data=True --batch_size=${BATCH_SIZE} --plugin='gemini' 2>&1 | tee run.log
done done
done done

View File

@ -1,3 +1,4 @@
import argparse
import gzip import gzip
from contextlib import nullcontext from contextlib import nullcontext
from functools import partial from functools import partial
@ -33,7 +34,7 @@ SEQ_LEN = 1024
def parse_args(): def parse_args():
parser = colossalai.get_default_parser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
"--distplan", "--distplan",
type=str, type=str,