From 65bdc3159f2971c71780bc1b9ca5ff9d2e8efc83 Mon Sep 17 00:00:00 2001 From: digger-yu Date: Sat, 6 May 2023 11:27:23 +0800 Subject: [PATCH] fix some spelling error with applications/Chat/examples/ (#3692) * fix spelling error with examples/comminity/ * fix spelling error with example/ --- applications/Chat/examples/README.md | 3 +-- .../Chat/examples/community/peft/easy_dataset.py | 10 +++++----- .../Chat/examples/community/peft/train_peft_prompts.py | 2 +- .../Chat/examples/community/peft/train_peft_sft.py | 2 +- applications/Chat/examples/train_sft.py | 2 +- 5 files changed, 9 insertions(+), 10 deletions(-) diff --git a/applications/Chat/examples/README.md b/applications/Chat/examples/README.md index 561ace220..2a2128e25 100644 --- a/applications/Chat/examples/README.md +++ b/applications/Chat/examples/README.md @@ -24,7 +24,6 @@ - [LLaMA](#llama) - [Add your own models](#add-your-own-models) - [Actor model](#actor-model) - - [LM model](#lm-model) - [Reward model](#reward-model) - [Critic model](#critic-model) @@ -150,7 +149,7 @@ torchrun --standalone --nproc_per_node=4 train_prompts.py \ --strategy colossalai_zero2 \ --prompt_dataset /path/to/your/prompt_dataset \ --pretrain_dataset /path/to/your/pretrain_dataset \ - --rm_pretrain /your/pretrain/rm/defination \ + --rm_pretrain /your/pretrain/rm/definition \ --rm_path /your/rm/model/path ``` diff --git a/applications/Chat/examples/community/peft/easy_dataset.py b/applications/Chat/examples/community/peft/easy_dataset.py index 24ea4f0a8..2fe293957 100644 --- a/applications/Chat/examples/community/peft/easy_dataset.py +++ b/applications/Chat/examples/community/peft/easy_dataset.py @@ -188,7 +188,7 @@ class EasySFTDataset(Dataset): else: raw_input_ids.append(encoded_ids) - grouped_inpup_ids = [] + grouped_input_ids = [] current_input_ids = [] attention_mask = [] if tokenizer.pad_token_id is None: @@ -199,7 +199,7 @@ class EasySFTDataset(Dataset): #pad the current_input_ids to max_length with tokenizer.pad_token_id padded_length = max_length - len(current_input_ids) current_input_ids.extend([tokenizer.pad_token_id] * padded_length) - grouped_inpup_ids.append(torch.tensor(current_input_ids, dtype=torch.long)) + grouped_input_ids.append(torch.tensor(current_input_ids, dtype=torch.long)) attention_mask.append( torch.tensor([1] * (max_length - padded_length) + [0] * padded_length, dtype=torch.long)) current_input_ids = [] @@ -208,7 +208,7 @@ class EasySFTDataset(Dataset): if len(current_input_ids) > 0: padded_length = max_length - len(current_input_ids) current_input_ids.extend([tokenizer.pad_token_id] * padded_length) - grouped_inpup_ids.append(torch.tensor(current_input_ids, dtype=torch.long)) + grouped_input_ids.append(torch.tensor(current_input_ids, dtype=torch.long)) attention_mask.append( torch.tensor([1] * (max_length - padded_length) + [0] * padded_length, dtype=torch.long)) else: @@ -218,8 +218,8 @@ class EasySFTDataset(Dataset): input_ids.extend([tokenizer.pad_token_id] * padded_length) attention_mask.append( torch.tensor([1] * (max_length - padded_length) + [0] * padded_length, dtype=torch.long)) - grouped_inpup_ids.append(torch.tensor(input_ids, dtype=torch.long)) - self.input_ids = grouped_inpup_ids + grouped_input_ids.append(torch.tensor(input_ids, dtype=torch.long)) + self.input_ids = grouped_input_ids self.labels = copy.deepcopy(self.input_ids) self.file_name = data_file self.attention_mask = attention_mask diff --git a/applications/Chat/examples/community/peft/train_peft_prompts.py b/applications/Chat/examples/community/peft/train_peft_prompts.py index 0e277021e..ba8470f38 100644 --- a/applications/Chat/examples/community/peft/train_peft_prompts.py +++ b/applications/Chat/examples/community/peft/train_peft_prompts.py @@ -41,7 +41,7 @@ def main(args): # configure model if args.model == 'bloom': # initial_model = BLOOMActor(pretrained=args.pretrain) - print('Using peft lora to load Bloom model as inital_model') + print('Using peft lora to load Bloom model as initial_model') initial_model = BLOOMActor(pretrained=args.pretrain, lora_path=args.sft_lora_path) print('Using peft lora to load Bloom model as initial_model (Done)') else: diff --git a/applications/Chat/examples/community/peft/train_peft_sft.py b/applications/Chat/examples/community/peft/train_peft_sft.py index 9bd0ebc12..d2b08b72c 100644 --- a/applications/Chat/examples/community/peft/train_peft_sft.py +++ b/applications/Chat/examples/community/peft/train_peft_sft.py @@ -86,7 +86,7 @@ def train(args): if args.strategy == 'colossalai_gemini': # this is a hack to deal with the resized embedding - # to make sure all parameters are ColoParameter for Colossal-AI Gemini Compatiblity + # to make sure all parameters are ColoParameter for Colossal-AI Gemini Compatibility for name, param in model.named_parameters(): if not isinstance(param, ColoParameter): sub_module_name = '.'.join(name.split('.')[:-1]) diff --git a/applications/Chat/examples/train_sft.py b/applications/Chat/examples/train_sft.py index da499f068..7fcd026fb 100644 --- a/applications/Chat/examples/train_sft.py +++ b/applications/Chat/examples/train_sft.py @@ -84,7 +84,7 @@ def train(args): if args.strategy == 'colossalai_gemini': # this is a hack to deal with the resized embedding - # to make sure all parameters are ColoParameter for Colossal-AI Gemini Compatiblity + # to make sure all parameters are ColoParameter for Colossal-AI Gemini Compatibility for name, param in model.named_parameters(): if not isinstance(param, ColoParameter): sub_module_name = '.'.join(name.split('.')[:-1])