mirror of https://github.com/hpcaitech/ColossalAI
[Fix] Remove obsolete files - inference (#5650)
parent
a8fd3b0342
commit
f342a93871
|
@ -1,59 +0,0 @@
|
|||
import argparse
|
||||
import os
|
||||
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from transformers import LlamaTokenizer
|
||||
|
||||
from colossalai.inference.quant.smoothquant.models.llama import SmoothLlamaForCausalLM
|
||||
|
||||
|
||||
def build_model_and_tokenizer(model_name):
|
||||
tokenizer = LlamaTokenizer.from_pretrained(model_name, model_max_length=512)
|
||||
kwargs = {"torch_dtype": torch.float16, "device_map": "sequential"}
|
||||
model = SmoothLlamaForCausalLM.from_pretrained(model_name, **kwargs)
|
||||
model = model.to(torch.float32)
|
||||
return model, tokenizer
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--model-name", type=str, help="model name")
|
||||
parser.add_argument(
|
||||
"--output-path",
|
||||
type=str,
|
||||
help="where to save the checkpoint",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dataset-path",
|
||||
type=str,
|
||||
help="location of the calibration dataset",
|
||||
)
|
||||
parser.add_argument("--num-samples", type=int, default=10)
|
||||
parser.add_argument("--seq-len", type=int, default=512)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def main():
|
||||
args = parse_args()
|
||||
model_path = args.model_name
|
||||
dataset_path = args.dataset_path
|
||||
output_path = args.output_path
|
||||
num_samples = args.num_samples
|
||||
seq_len = args.seq_len
|
||||
|
||||
model, tokenizer = build_model_and_tokenizer(model_path)
|
||||
if not os.path.exists(dataset_path):
|
||||
raise FileNotFoundError(f"Cannot find the dataset at {args.dataset_path}")
|
||||
dataset = load_dataset("json", data_files=dataset_path, split="train")
|
||||
|
||||
model.quantized(tokenizer, dataset, num_samples=num_samples, seq_len=seq_len)
|
||||
model = model.cuda()
|
||||
|
||||
model.save_quantized(output_path, model_basename="llama-7b")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,98 +0,0 @@
|
|||
import argparse
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from transformers import LlamaForCausalLM, LlamaTokenizer
|
||||
|
||||
import colossalai
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.inference import InferenceEngine
|
||||
from colossalai.testing import spawn
|
||||
|
||||
INPUT_TEXTS = [
|
||||
"What is the longest river in the world?",
|
||||
"Explain the difference between process and thread in compouter science.",
|
||||
]
|
||||
|
||||
|
||||
def run_inference(args):
|
||||
llama_model_path = args.model_path
|
||||
llama_tokenize_path = args.tokenizer_path or args.model_path
|
||||
|
||||
max_input_len = args.max_input_len
|
||||
max_output_len = args.max_output_len
|
||||
max_batch_size = args.batch_size
|
||||
micro_batch_size = args.micro_batch_size
|
||||
tp_size = args.tp_size
|
||||
pp_size = args.pp_size
|
||||
rank = dist.get_rank()
|
||||
|
||||
tokenizer = LlamaTokenizer.from_pretrained(llama_tokenize_path, padding_side="left")
|
||||
tokenizer.pad_token_id = tokenizer.eos_token_id
|
||||
|
||||
if args.quant is None:
|
||||
model = LlamaForCausalLM.from_pretrained(llama_model_path, pad_token_id=tokenizer.pad_token_id)
|
||||
elif args.quant == "gptq":
|
||||
from auto_gptq import AutoGPTQForCausalLM
|
||||
|
||||
model = AutoGPTQForCausalLM.from_quantized(
|
||||
llama_model_path, inject_fused_attention=False, device=torch.cuda.current_device()
|
||||
)
|
||||
elif args.quant == "smoothquant":
|
||||
from colossalai.inference.quant.smoothquant.models.llama import SmoothLlamaForCausalLM
|
||||
|
||||
model = SmoothLlamaForCausalLM.from_quantized(llama_model_path, model_basename=args.smoothquant_base_name)
|
||||
model = model.cuda()
|
||||
|
||||
engine = InferenceEngine(
|
||||
tp_size=tp_size,
|
||||
pp_size=pp_size,
|
||||
model=model,
|
||||
max_input_len=max_input_len,
|
||||
max_output_len=max_output_len,
|
||||
max_batch_size=max_batch_size,
|
||||
micro_batch_size=micro_batch_size,
|
||||
quant=args.quant,
|
||||
dtype=args.dtype,
|
||||
)
|
||||
|
||||
inputs = tokenizer(INPUT_TEXTS, return_tensors="pt", padding="longest", max_length=max_input_len, truncation=True)
|
||||
inputs = {k: v.to(get_accelerator().get_current_device()) for k, v in inputs.items()}
|
||||
outputs = engine.generate(inputs)
|
||||
|
||||
if rank == 0:
|
||||
output_texts = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
for input_text, output_text in zip(INPUT_TEXTS, output_texts):
|
||||
print(f"Input: {input_text}")
|
||||
print(f"Output: {output_text}")
|
||||
|
||||
|
||||
def run_tp_pipeline_inference(rank, world_size, port, args):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
||||
run_inference(args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-p", "--model_path", type=str, help="Model path", required=True)
|
||||
parser.add_argument("-i", "--input", default="What is the longest river in the world?")
|
||||
parser.add_argument("-t", "--tokenizer_path", type=str, help="Tokenizer path", default=None)
|
||||
parser.add_argument(
|
||||
"-q",
|
||||
"--quant",
|
||||
type=str,
|
||||
choices=["gptq", "smoothquant"],
|
||||
default=None,
|
||||
help="quantization type: 'gptq' or 'smoothquant'",
|
||||
)
|
||||
parser.add_argument("--smoothquant_base_name", type=str, default=None, help="soothquant base name")
|
||||
parser.add_argument("--tp_size", type=int, default=1, help="Tensor parallel size")
|
||||
parser.add_argument("--pp_size", type=int, default=1, help="Pipeline parallel size")
|
||||
parser.add_argument("-b", "--batch_size", type=int, default=4, help="Maximum batch size")
|
||||
parser.add_argument("--max_input_len", type=int, default=2048, help="Maximum input length")
|
||||
parser.add_argument("--max_output_len", type=int, default=64, help="Maximum output length")
|
||||
parser.add_argument("--micro_batch_size", type=int, default=1, help="Micro batch size")
|
||||
parser.add_argument("--dtype", default="fp16", type=str)
|
||||
|
||||
args = parser.parse_args()
|
||||
spawn(run_tp_pipeline_inference, nprocs=args.tp_size * args.pp_size, args=args)
|
|
@ -1,144 +0,0 @@
|
|||
import pytest
|
||||
import torch
|
||||
from packaging import version
|
||||
|
||||
try:
|
||||
HAS_TRITON = True
|
||||
except ImportError:
|
||||
HAS_TRITON = False
|
||||
print("please install triton from https://github.com/openai/triton")
|
||||
|
||||
try:
|
||||
from auto_gptq.modeling._utils import autogptq_post_init
|
||||
from auto_gptq.utils.import_utils import dynamically_import_QuantLinear
|
||||
from exllama_kernels import prepare_buffers, set_tuning_params
|
||||
|
||||
from colossalai.inference.quant.gptq import CaiQuantLinear
|
||||
|
||||
HAS_AUTO_GPTQ = True
|
||||
except:
|
||||
HAS_AUTO_GPTQ = False
|
||||
print("please install AutoGPTQ from https://github.com/PanQiWei/AutoGPTQ")
|
||||
|
||||
import warnings
|
||||
|
||||
HAS_GPTQ_CUDA = False
|
||||
try:
|
||||
from colossalai.kernel.op_builder.gptq import GPTQBuilder
|
||||
|
||||
gptq_cuda = GPTQBuilder().load()
|
||||
HAS_GPTQ_CUDA = True
|
||||
except ImportError:
|
||||
warnings.warn("CUDA gptq is not installed")
|
||||
HAS_GPTQ_CUDA = False
|
||||
|
||||
TRITON_CUDA_SUPPORT = version.parse(torch.version.cuda) > version.parse("11.4")
|
||||
|
||||
max_inner_outer_dim = 1
|
||||
max_input_len = 1
|
||||
max_dq_buffer_size = 1
|
||||
gptq_temp_dq_buffer = None
|
||||
gptq_temp_state_buffer = None
|
||||
|
||||
|
||||
def init_buffer(cai_linear, use_act_order=False):
|
||||
global max_dq_buffer_size
|
||||
global max_input_len
|
||||
global max_dq_buffer_size
|
||||
global max_inner_outer_dim
|
||||
global gptq_temp_dq_buffer
|
||||
global gptq_temp_state_buffer
|
||||
|
||||
max_dq_buffer_size = max(max_dq_buffer_size, cai_linear.qweight.numel() * 8)
|
||||
|
||||
if use_act_order:
|
||||
max_inner_outer_dim = max(max_inner_outer_dim, cai_linear.infeatures, cai_linear.outfeatures)
|
||||
|
||||
if use_act_order:
|
||||
max_input_len = 4096
|
||||
# The temp_state buffer is required to reorder X in the act-order case.
|
||||
# The temp_dq buffer is required to dequantize weights when using cuBLAS, typically for the prefill.
|
||||
gptq_temp_state_buffer = torch.zeros(
|
||||
(max_input_len, max_inner_outer_dim), dtype=torch.float16, device=torch.cuda.current_device()
|
||||
)
|
||||
gptq_temp_dq_buffer = torch.zeros((1, max_dq_buffer_size), dtype=torch.float16, device=torch.cuda.current_device())
|
||||
|
||||
gptq_cuda.prepare_buffers(torch.device(torch.cuda.current_device()), gptq_temp_state_buffer, gptq_temp_dq_buffer)
|
||||
# Using the default from exllama repo here.
|
||||
matmul_recons_thd = 8
|
||||
matmul_fused_remap = False
|
||||
matmul_no_half2 = False
|
||||
gptq_cuda.set_tuning_params(matmul_recons_thd, matmul_fused_remap, matmul_no_half2)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not TRITON_CUDA_SUPPORT or not HAS_TRITON or not HAS_AUTO_GPTQ,
|
||||
reason="triton requires cuda version to be higher than 11.4 or not install auto-gptq",
|
||||
)
|
||||
def test_gptq_linear():
|
||||
infeature = 1024
|
||||
outfeature = 1024
|
||||
group_size = 128
|
||||
wbits = 4
|
||||
|
||||
inps = torch.ones(1, 1, infeature).to(torch.float16).to(torch.cuda.current_device())
|
||||
batch_inps = torch.randn(1, 16, infeature).to(torch.float16).to(torch.cuda.current_device())
|
||||
|
||||
device = torch.device("cuda:0")
|
||||
|
||||
linear_class = dynamically_import_QuantLinear(use_triton=False, desc_act=False, group_size=group_size, bits=wbits)
|
||||
|
||||
linear = linear_class(
|
||||
bits=4,
|
||||
group_size=group_size,
|
||||
infeatures=infeature,
|
||||
outfeatures=outfeature,
|
||||
bias=False,
|
||||
)
|
||||
|
||||
torch.manual_seed(42)
|
||||
|
||||
linear.qweight = torch.randint(-100, 100, size=linear.qweight.shape, dtype=torch.int32)
|
||||
linear.scales = linear.scales + 0.002
|
||||
|
||||
linear = linear.to(device)
|
||||
|
||||
cai_linear = CaiQuantLinear(wbits, group_size, infeature, outfeature, True)
|
||||
cai_linear.qweight.data.copy_(linear.qweight)
|
||||
cai_linear.scales = cai_linear.scales + 0.002
|
||||
cai_linear = cai_linear.to(device)
|
||||
|
||||
linear = autogptq_post_init(linear, use_act_order=False)
|
||||
|
||||
max_inner_outer_dim = max(infeature, outfeature)
|
||||
max_dq_buffer_size = linear.infeatures * linear.outfeatures
|
||||
max_input_len = 2048
|
||||
buffers = {
|
||||
"temp_state": torch.zeros((max_input_len, max_inner_outer_dim), dtype=torch.float16, device=device),
|
||||
"temp_dq": torch.zeros((1, max_dq_buffer_size), dtype=torch.float16, device=device),
|
||||
}
|
||||
|
||||
prepare_buffers(device, buffers["temp_state"], buffers["temp_dq"])
|
||||
|
||||
# Using the default from exllama repo here.
|
||||
matmul_recons_thd = 8
|
||||
matmul_fused_remap = False
|
||||
matmul_no_half2 = False
|
||||
set_tuning_params(matmul_recons_thd, matmul_fused_remap, matmul_no_half2)
|
||||
|
||||
with torch.no_grad():
|
||||
gptq_out = linear(inps)
|
||||
batch_gptq_out = linear(batch_inps)
|
||||
torch.cuda.synchronize()
|
||||
cai_out = cai_linear(inps)
|
||||
torch.cuda.synchronize()
|
||||
|
||||
batch_cai_out = cai_linear(batch_inps)
|
||||
torch.cuda.synchronize()
|
||||
|
||||
assert torch.allclose(cai_out, gptq_out, rtol=1e-01, atol=1e-01)
|
||||
assert torch.allclose(batch_cai_out, batch_gptq_out, rtol=1e-01, atol=1e-01)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_gptq_linear()
|
Loading…
Reference in New Issue