From 7aacfad8aff2fe3654aa3f0204e5dc6fad813ed3 Mon Sep 17 00:00:00 2001 From: "CH.Li" <32587096+lich99@users.noreply.github.com> Date: Wed, 15 Feb 2023 14:54:53 +0800 Subject: [PATCH] fix typo (#2721) --- applications/ChatGPT/benchmarks/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/ChatGPT/benchmarks/README.md b/applications/ChatGPT/benchmarks/README.md index f7212fc89..b4e28ba1d 100644 --- a/applications/ChatGPT/benchmarks/README.md +++ b/applications/ChatGPT/benchmarks/README.md @@ -37,7 +37,7 @@ We only support `torchrun` to launch now. E.g. ```shell # run GPT2-S on single-node single-GPU with min batch size -torchrun --standalone --nproc_pero_node 1 benchmark_gpt_dummy.py --model s --strategy ddp --experience_batch_size 1 --train_batch_size 1 +torchrun --standalone --nproc_per_node 1 benchmark_gpt_dummy.py --model s --strategy ddp --experience_batch_size 1 --train_batch_size 1 # run GPT2-XL on single-node 4-GPU torchrun --standalone --nproc_per_node 4 benchmark_gpt_dummy.py --model xl --strategy colossalai_zero2 # run GPT3 on 8-node 8-GPU @@ -84,7 +84,7 @@ We only support `torchrun` to launch now. E.g. ```shell # run OPT-125M with no lora (lora_rank=0) on single-node single-GPU with min batch size -torchrun --standalone --nproc_pero_node 1 benchmark_opt_lora_dummy.py --model 125m --strategy ddp --experience_batch_size 1 --train_batch_size 1 --lora_rank 0 +torchrun --standalone --nproc_per_node 1 benchmark_opt_lora_dummy.py --model 125m --strategy ddp --experience_batch_size 1 --train_batch_size 1 --lora_rank 0 # run OPT-350M with lora_rank=4 on single-node 4-GPU torchrun --standalone --nproc_per_node 4 benchmark_opt_lora_dummy.py --model 350m --strategy colossalai_zero2 --lora_rank 4 ```