2024-03-29 06:12:29 +00:00
|
|
|
import argparse
|
|
|
|
import json
|
|
|
|
import os
|
|
|
|
from typing import Dict
|
|
|
|
|
|
|
|
import torch
|
|
|
|
from chatio import dummy_io, rich_io, simple_io
|
|
|
|
from coati.dataset.conversation import setup_conversation_template
|
|
|
|
from coati.models import generate_streaming
|
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedModel
|
|
|
|
|
|
|
|
from colossalai.logging import get_dist_logger
|
|
|
|
|
|
|
|
logger = get_dist_logger()
|
|
|
|
|
|
|
|
|
|
|
|
def get_gpu_memory(max_gpus=None):
|
|
|
|
"""
|
|
|
|
Get the available memory for each GPU.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
max_gpus (int, optional): The maximum number of GPUs to consider. Defaults to None.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
list: A list of available memory for each GPU.
|
|
|
|
"""
|
|
|
|
gpu_memory = []
|
|
|
|
num_gpus = torch.cuda.device_count() if max_gpus is None else min(max_gpus, torch.cuda.device_count())
|
|
|
|
|
|
|
|
for gpu_id in range(num_gpus):
|
|
|
|
# Code to get GPU memory goes here
|
|
|
|
with torch.cuda.device(gpu_id):
|
|
|
|
device = torch.cuda.current_device()
|
|
|
|
gpu_properties = torch.cuda.get_device_properties(device)
|
|
|
|
total_memory = gpu_properties.total_memory / (1024**3)
|
|
|
|
allocated_memory = torch.cuda.memory_allocated() / (1024**3)
|
|
|
|
available_memory = total_memory - allocated_memory
|
|
|
|
gpu_memory.append(available_memory)
|
|
|
|
return gpu_memory
|
|
|
|
|
|
|
|
|
|
|
|
def load_model_and_tokenizer(model_path, tokenizer_path, device="cuda", **kwargs):
|
|
|
|
"""
|
|
|
|
Load the model and tokenizer from the specified paths and move the model to the specified device.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
model_path (str): The path to the pre-trained model.
|
|
|
|
tokenizer_path (str): The path to the pre-trained tokenizer.
|
|
|
|
device (str, optional): The device to move the model to. Defaults to "cuda".
|
|
|
|
**kwargs: Additional keyword arguments to be passed to the `AutoModelForCausalLM.from_pretrained` function.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
tuple: A tuple containing the loaded model and tokenizer.
|
|
|
|
"""
|
|
|
|
|
[fp8] Merge feature/fp8_comm to main branch of Colossalai (#6016)
* add SimPO
* fix dataloader
* remove debug code
* add orpo
* fix style
* fix colossalai, transformers version
* fix colossalai, transformers version
* fix colossalai, transformers version
* fix torch colossalai version
* update transformers version
* [shardformer] DeepseekMoE support (#5871)
* [Feature] deepseek moe expert parallel implement
* [misc] fix typo, remove redundant file (#5867)
* [misc] fix typo
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* [Feature] deepseek support & unit test
* [misc] remove debug code & useless print
* [misc] fix typos (#5872)
* [Feature] remove modeling file, use auto config. (#5884)
* [misc] fix typos
* [Feature] deepseek support via auto model, remove modeling file
* [misc] delete useless file
* [misc] fix typos
* [Deepseek] remove redundant code (#5888)
* [misc] fix typos
* [Feature] deepseek support via auto model, remove modeling file
* [misc] delete useless file
* [misc] fix typos
* [misc] remove redundant code
* [Feature/deepseek] resolve comment. (#5889)
* [misc] fix typos
* [Feature] deepseek support via auto model, remove modeling file
* [misc] delete useless file
* [misc] fix typos
* [misc] remove redundant code
* [misc] mv module replacement into if branch
* [misc] add some warning message and modify some code in unit test
* [misc] fix typos
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* [Hoxfix] Fix CUDA_DEVICE_MAX_CONNECTIONS for comm overlap
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* [Feat] Diffusion Model(PixArtAlpha/StableDiffusion3) Support (#5838)
* Diffusion Model Inference support
* Stable Diffusion 3 Support
* pixartalpha support
* [HotFix] CI,import,requirements-test for #5838 (#5892)
* [Hot Fix] CI,import,requirements-test
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* [Feature] Enable PP + SP for llama (#5868)
* fix cross-PP-stage position id length diff bug
* fix typo
* fix typo
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* use a one cross entropy func for all shardformer models
---------
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* [ShardFormer] Add Ulysses Sequence Parallelism support for Command-R, Qwen2 and ChatGLM (#5897)
* add benchmark for sft, dpo, simpo, orpo. Add benchmarking result. Support lora with gradient checkpoint
* fix style
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix eval
* hotfix citation
* [zero] support all-gather overlap (#5898)
* [zero] support all-gather overlap
* [zero] add overlap all-gather flag
* [misc] fix typo
* [zero] update api
* fix orpo cross entropy loss
* [Auto Parallel]: Speed up intra-op plan generation by 44% (#5446)
* Remove unnecessary calls to deepcopy
* Build DimSpec's difference dict only once
This change considerably speeds up construction speed of DimSpec objects. The difference_dict is the same for each DimSpec object, so a single copy of it is enough.
* Fix documentation of DimSpec's difference method
* [ShardFormer] fix qwen2 sp (#5903)
* [compatibility] support torch 2.2 (#5875)
* Support Pytorch 2.2.2
* keep build_on_pr file and update .compatibility
* fix object_to_tensor usage when torch>=2.3.0 (#5820)
* [misc] support torch2.3 (#5893)
* [misc] support torch2.3
* [devops] update compatibility ci
* [devops] update compatibility ci
* [devops] add debug
* [devops] add debug
* [devops] add debug
* [devops] add debug
* [devops] remove debug
* [devops] remove debug
* [release] update version (#5912)
* [plugin] support all-gather overlap for hybrid parallel (#5919)
* [plugin] fixed all-gather overlap support for hybrid parallel
* add kto
* fix style, add kto data sample
* [Examples] Add lazy init to OPT and GPT examples (#5924)
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* [ColossalChat] Hotfix for ColossalChat (#5910)
* add ignore and tiny llama
* fix path issue
* run style
* fix issue
* update bash
* add ignore and tiny llama
* fix path issue
* run style
* fix issue
* update bash
* fix ddp issue
* add Qwen 1.5 32B
* refactor tokenization
* [FIX BUG] UnboundLocalError: cannot access local variable 'default_conversation' where it is not associated with a value (#5931)
* cannot access local variable 'default_conversation' where it is not associated with a value
set default value for 'default_conversation'
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* fix test data
* refactor evaluation
* remove real data path
* remove real data path
* Add n_fused as an input from native_module (#5894)
* [FIX BUG] convert env param to int in (#5934)
* [Hotfix] Fix ZeRO typo #5936
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* [Feature] Add a switch to control whether the model checkpoint needs to be saved after each epoch ends (#5941)
* Add a switch to control whether the model checkpoint needs to be saved after each epoch ends
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* fix style
* fix style
* fix style
* [shardformer] hotfix attn mask (#5945)
* [shardformer] hotfix attn mask (#5947)
* [Feat] Distrifusion Acceleration Support for Diffusion Inference (#5895)
* Distrifusion Support source
* comp comm overlap optimization
* sd3 benchmark
* pixart distrifusion bug fix
* sd3 bug fix and benchmark
* generation bug fix
* naming fix
* add docstring, fix counter and shape error
* add reference
* readme and requirement
* [zero] hotfix update master params (#5951)
* [release] update version (#5952)
* [Chat] Fix lora (#5946)
* fix merging
* remove filepath
* fix style
* Update README.md (#5958)
* [hotfix] Remove unused plan section (#5957)
* remove readme
* fix readme
* update
* [test] add mixtral for sequence classification
* [test] add mixtral transformer test
* [moe] fix plugin
* [test] mixtra pp shard test
* [chore] handle non member group
* [zero] solve hang
* [test] pass mixtral shardformer test
* [moe] implement transit between non moe tp and ep
* [zero] solve hang
* [misc] solve booster hang by rename the variable
* solve hang when parallel mode = pp + dp
* [moe] implement submesh initialization
* [moe] add mixtral dp grad scaling when not all experts are activated
* [chore] manually revert unintended commit
* [chore] trivial fix
* [chore] arg pass & remove drop token
* [test] add mixtral modelling test
* [moe] implement tp
* [moe] test deepseek
* [moe] clean legacy code
* [Feature] MoE Ulysses Support (#5918)
* moe sp support
* moe sp bug solve
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* [chore] minor fix
* [moe] init moe plugin comm setting with sp
* moe sp + ep bug fix
* [moe] finalize test (no pp)
* [moe] full test for deepseek and mixtral (pp + sp to fix)
* [chore] minor fix after rebase
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [chore] solve moe ckpt test failure and some other arg pass failure
* [moe] remove ops
* [test] fix test: test_zero1_2
* [bug] fix: somehow logger hangs the program
* [moe] deepseek moe sp support
* [test] add check
* [deepseek] replace attn (a workaround for bug in transformers)
* [misc] skip redunant test
* [misc] remove debug/print code
* [moe] refactor mesh assignment
* Revert "[moe] implement submesh initialization"
This reverts commit 2f9bce6686d1415a83d5726dc5ff02222c742582.
* [chore] change moe_pg_mesh to private
* [misc] remove incompatible test config
* [misc] fix ci failure: change default value to false in moe plugin
* [misc] remove useless condition
* [chore] docstring
* [moe] remove force_overlap_comm flag and add warning instead
* [doc] add MoeHybridParallelPlugin docstring
* [moe] solve dp axis issue
* [chore] remove redundant test case, print string & reduce test tokens
* [feat] Dist Loader for Eval (#5950)
* support auto distributed data loader
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* support auto distributed data loader
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix tp error
* remove unused parameters
* remove unused
* update inference
* update docs
* update inference
---------
Co-authored-by: Michelle <qianranma8@gmail.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* [lora] lora support hybrid parallel plugin (#5956)
* lora support hybrid plugin
* fix
* fix
* fix
* fix
* Support overall loss, update KTO logging
* [Docs] clarify launch port
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* [Hotfix] README link (#5966)
* update ignore
* update readme
* run style
* update readme
* [Hotfix] Avoid fused RMSnorm import error without apex (#5985)
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* [Chat] fix readme (#5989)
* fix readme
* fix readme, tokenization fully tested
* fix readme, tokenization fully tested
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---------
Co-authored-by: root <root@notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9-0.notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9.colossal-ai.svc.cluster.local>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* fix sync condition (#6000)
* [plugin] add cast inputs option for zero (#6003)
* [pre-commit.ci] pre-commit autoupdate (#5995)
updates:
- [github.com/psf/black-pre-commit-mirror: 24.4.2 → 24.8.0](https://github.com/psf/black-pre-commit-mirror/compare/24.4.2...24.8.0)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* [misc] Bypass the huggingface bug to solve the mask mismatch problem (#5991)
* [Feature] Zigzag Ring attention (#5905)
* halfway
* fix cross-PP-stage position id length diff bug
* fix typo
* fix typo
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* unified cross entropy func for all shardformer models
* remove redundant lines
* add basic ring attn; debug cross entropy
* fwd bwd logic complete
* fwd bwd logic complete; add experimental triton rescale
* precision tests passed
* precision tests passed
* fix typos and remove misc files
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* add sp_mode to benchmark; fix varlen interface
* update softmax_lse shape by new interface
* change tester name
* remove buffer clone; support packed seq layout
* add varlen tests
* fix typo
* all tests passed
* add dkv_group; fix mask
* remove debug statements
---------
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* [misc] update compatibility (#6008)
* [misc] update compatibility
* [misc] update requirements
* [devops] disable requirements cache
* [test] fix torch ddp test
* [test] fix rerun on address in use
* [test] fix lazy init
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix the merge
* fix the merge
* overlap kv comm with output rescale (#6017)
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* fix the merge
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix the merge
* fix
* fix
* fix the merge
* fix
* [misc] Use dist logger in plugins (#6011)
* use dist logger in plugins
* remove trash
* print on rank 0
---------
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* fix
* fix
* fix
* fix
* fix the merge
* fix
* fix
* fix
* fix
---------
Co-authored-by: YeAnbang <anbangy2@outlook.com>
Co-authored-by: Haze188 <haze188@qq.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu>
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
Co-authored-by: Runyu Lu <77330637+LRY89757@users.noreply.github.com>
Co-authored-by: Guangyao Zhang <xjtu521@qq.com>
Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com>
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: Stephan Kö <stephankoe@users.noreply.github.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: Tong Li <tong.li352711588@gmail.com>
Co-authored-by: zhurunhua <1281592874@qq.com>
Co-authored-by: Insu Jang <insujang@umich.edu>
Co-authored-by: Gao, Ruiyuan <905370712@qq.com>
Co-authored-by: hxwang <wang1570@e.ntu.edu.sg>
Co-authored-by: Michelle <qianranma8@gmail.com>
Co-authored-by: root <root@notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9-0.notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9.colossal-ai.svc.cluster.local>
2024-08-22 01:21:34 +00:00
|
|
|
model = AutoModelForCausalLM.from_pretrained(model_path, **kwargs, trust_remote_code=True).to(torch.bfloat16)
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path, trust_remote_code=True)
|
2024-03-29 06:12:29 +00:00
|
|
|
tokenizer.pad_token = tokenizer.eos_token
|
|
|
|
model.to(device)
|
|
|
|
|
|
|
|
return model, tokenizer
|
|
|
|
|
|
|
|
|
|
|
|
def _set_default_generate_kwargs(model: PreTrainedModel) -> Dict:
|
|
|
|
"""
|
|
|
|
Set default keyword arguments for generation based on the given model.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
model (PreTrainedModel): The model used for generation.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Dict: A dictionary containing the default keyword arguments for generation.
|
|
|
|
"""
|
|
|
|
unwrapped_model = model
|
|
|
|
new_kwargs = {}
|
|
|
|
# Use huggingface models method directly
|
|
|
|
if hasattr(unwrapped_model, "prepare_inputs_for_generation"):
|
|
|
|
new_kwargs["prepare_inputs_fn"] = unwrapped_model.prepare_inputs_for_generation
|
|
|
|
|
|
|
|
if hasattr(unwrapped_model, "_update_model_kwargs_for_generation"):
|
|
|
|
new_kwargs["update_model_kwargs_fn"] = unwrapped_model._update_model_kwargs_for_generation
|
|
|
|
return new_kwargs
|
|
|
|
|
|
|
|
|
|
|
|
def generation_wrapper(*args, **kwargs):
|
|
|
|
input_ids = args[1]
|
|
|
|
tokenizer = args[2]
|
|
|
|
for output in generate_streaming(*args, **kwargs):
|
|
|
|
yield tokenizer.batch_decode(output[:, input_ids.size(1) :], skip_special_tokens=True)[0]
|
|
|
|
|
|
|
|
|
|
|
|
def main(args):
|
|
|
|
conversation_template_config = json.load(open(args.conversation_template_config, "r", encoding="utf8"))
|
|
|
|
|
|
|
|
max_new_tokens = args.max_new_tokens
|
|
|
|
model_max_length = args.model_max_length
|
|
|
|
model, tokenizer = load_model_and_tokenizer(
|
|
|
|
args.model_path, args.tokenizer_path or args.model_path, local_files_only=True
|
|
|
|
)
|
|
|
|
|
|
|
|
assert max_new_tokens <= model_max_length
|
|
|
|
if hasattr(tokenizer, "pad_token") and hasattr(tokenizer, "eos_token") and tokenizer.eos_token is not None:
|
|
|
|
try:
|
|
|
|
# Some tokenizers doesn't allow to set pad_token mannually e.g., Qwen
|
|
|
|
tokenizer.pad_token = tokenizer.eos_token
|
|
|
|
except AttributeError as e:
|
|
|
|
logger.warning(f"Unable to set pad token to eos token, {str(e)}")
|
|
|
|
tokenizer.padding_side = "left"
|
|
|
|
|
|
|
|
model_kwargs = {
|
|
|
|
"max_new_tokens": max_new_tokens,
|
|
|
|
# 'early_stopping': True,
|
|
|
|
# 'top_k': -1,
|
|
|
|
# 'top_p': 1.0,
|
|
|
|
# 'temperature': 1.0,
|
|
|
|
# 'temperature':0.1,
|
|
|
|
}
|
|
|
|
round = 1
|
|
|
|
|
|
|
|
conv = setup_conversation_template(tokenizer, conversation_template_config, args.conversation_template_config)
|
|
|
|
|
|
|
|
while True:
|
|
|
|
if args.io == "simple":
|
|
|
|
chat_io = simple_io
|
|
|
|
elif args.io == "rich":
|
|
|
|
chat_io = rich_io
|
|
|
|
elif args.io == "dummy":
|
|
|
|
chat_io = dummy_io
|
|
|
|
else:
|
|
|
|
raise ValueError(f"Unknown io type: {args.io}")
|
|
|
|
# raw_text = print(">>> Human:", end=" ")
|
|
|
|
inp = chat_io.prompt_for_input("user")
|
|
|
|
|
|
|
|
if not inp:
|
|
|
|
print("prompt should not be empty!")
|
|
|
|
continue
|
|
|
|
|
|
|
|
if inp.strip() == "clear":
|
|
|
|
conv.clear()
|
|
|
|
os.system("clear")
|
|
|
|
continue
|
|
|
|
|
|
|
|
if inp.strip() == "exit":
|
|
|
|
print("End of chat.")
|
|
|
|
break
|
|
|
|
|
|
|
|
query_text = inp.strip()
|
|
|
|
|
|
|
|
conv.append_message("user", query_text)
|
|
|
|
|
|
|
|
chat_io.prompt_for_output("assistant")
|
|
|
|
|
|
|
|
prompt = conv.get_prompt(add_generation_prompt=True)
|
|
|
|
input_ids = tokenizer(prompt, return_tensors="pt", add_special_tokens=False)["input_ids"].to(
|
|
|
|
torch.cuda.current_device()
|
|
|
|
)
|
|
|
|
default_generate_kwargs = _set_default_generate_kwargs(model)
|
|
|
|
model_kwargs.update(default_generate_kwargs)
|
|
|
|
output_stream = generation_wrapper(
|
|
|
|
model,
|
|
|
|
input_ids,
|
|
|
|
tokenizer,
|
|
|
|
max_length=model_max_length,
|
|
|
|
temperature=0.7,
|
|
|
|
early_stopping=True,
|
|
|
|
stop_token_ids=conversation_template_config["stop_ids"],
|
|
|
|
**model_kwargs,
|
|
|
|
)
|
|
|
|
|
|
|
|
# print(f">>> Assistant:", end=" ")
|
|
|
|
outputs = chat_io.stream_output(output_stream)
|
|
|
|
|
|
|
|
conv.append_message("assistant", outputs.strip())
|
|
|
|
|
|
|
|
with open("round.txt", mode="a", encoding="utf-8") as f:
|
|
|
|
f.write("\n\n" + "=" * 10 + "\n")
|
|
|
|
f.write(f"round {round}:\n{conv.save_prompt()}\n\n")
|
|
|
|
f.write("=" * 10 + "\n")
|
|
|
|
|
|
|
|
# print(f">>> Assistant:", end=" ")
|
|
|
|
|
|
|
|
round += 1
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
parser = argparse.ArgumentParser()
|
|
|
|
parser.add_argument("--model_path", type=str, default=None)
|
|
|
|
parser.add_argument("--tokenizer_path", type=str, default=None)
|
|
|
|
parser.add_argument("--conversation_template_config", type=str, default=None)
|
|
|
|
parser.add_argument("--model_max_length", type=int, default=2048)
|
|
|
|
parser.add_argument("--max_new_tokens", type=int, default=512)
|
|
|
|
parser.add_argument("--io", type=str, default="rich", choices=["simple", "rich", "dummy"])
|
|
|
|
args = parser.parse_args()
|
|
|
|
main(args)
|