From a51bdd761a58a13f57e513d386a0b5ba50c105c5 Mon Sep 17 00:00:00 2001 From: Yif-Liu-Github <54025962+Yif-Liu-Github@users.noreply.github.com> Date: Mon, 21 Aug 2023 23:59:34 +0800 Subject: [PATCH] Update openai_api.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 使用chunk.model_dump_json方法,替换chunk.json方法,以修复报错dumps_kwargs keyword arguments are no longer supported --- openai_api.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/openai_api.py b/openai_api.py index 7225562..a88e491 100644 --- a/openai_api.py +++ b/openai_api.py @@ -135,7 +135,7 @@ async def predict(query: str, history: List[List[str]], model_id: str): finish_reason=None ) chunk = ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk") - yield "{}".format(chunk.json(exclude_unset=True, ensure_ascii=False)) + yield "{}".format(chunk.model_dump_json(exclude_unset=True,exclude_none=True)) current_length = 0 @@ -152,7 +152,7 @@ async def predict(query: str, history: List[List[str]], model_id: str): finish_reason=None ) chunk = ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk") - yield "{}".format(chunk.json(exclude_unset=True, ensure_ascii=False)) + yield "{}".format(chunk.model_dump_json(exclude_unset=True,exclude_none=True)) choice_data = ChatCompletionResponseStreamChoice( @@ -161,14 +161,14 @@ async def predict(query: str, history: List[List[str]], model_id: str): finish_reason="stop" ) chunk = ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk") - yield "{}".format(chunk.json(exclude_unset=True, ensure_ascii=False)) + yield "{}".format(chunk.model_dump_json(exclude_unset=True,exclude_none=True)) yield '[DONE]' if __name__ == "__main__": - tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True) - model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).cuda() + tokenizer = AutoTokenizer.from_pretrained("chatglm2-6b", trust_remote_code=True) + model = AutoModel.from_pretrained("chatglm2-6b", trust_remote_code=True).cuda() # 多显卡支持,使用下面两行代替上面一行,将num_gpus改为你实际的显卡数量 # from utils import load_model_on_gpus # model = load_model_on_gpus("THUDM/chatglm2-6b", num_gpus=2)