commit 057afcce1946227aa8e3a38fdb8b3aafca1ea7f8 Author: duzx16 <904663169@qq.com> Date: Sat Jun 24 16:31:08 2023 +0800 Init commit diff --git a/README.md b/README.md new file mode 100644 index 0000000..e69de29 diff --git a/cli_demo.py b/cli_demo.py new file mode 100644 index 0000000..b749e31 --- /dev/null +++ b/cli_demo.py @@ -0,0 +1,59 @@ +import os +import platform +import signal +from transformers import AutoTokenizer, AutoModel +import readline + +tokenizer = AutoTokenizer.from_pretrained("/mnt/vepfs/workspace/zxdu/chatglm-6b-v2-dev", trust_remote_code=True) +model = AutoModel.from_pretrained("/mnt/vepfs/workspace/zxdu/chatglm-6b-v2-dev", trust_remote_code=True, device='cuda') +model = model.eval() + +os_name = platform.system() +clear_command = 'cls' if os_name == 'Windows' else 'clear' +stop_stream = False + + +def build_prompt(history): + prompt = "欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序" + for query, response in history: + prompt += f"\n\n用户:{query}" + prompt += f"\n\nChatGLM-6B:{response}" + return prompt + + +def signal_handler(signal, frame): + global stop_stream + stop_stream = True + + +def main(): + past_key_values, history = None, [] + global stop_stream + print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序") + while True: + query = input("\n用户:") + if query.strip() == "stop": + break + if query.strip() == "clear": + past_key_values, history = None, [] + os.system(clear_command) + print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序") + continue + count = 0 + for response, history, past_key_values in model.stream_chat(tokenizer, query, history=history, + past_key_values=past_key_values): + if stop_stream: + stop_stream = False + break + else: + count += 1 + if count % 8 == 0: + os.system(clear_command) + print(build_prompt(history), flush=True) + signal.signal(signal.SIGINT, signal_handler) + os.system(clear_command) + print(build_prompt(history), flush=True) + + +if __name__ == "__main__": + main() diff --git a/web_demo.py b/web_demo.py new file mode 100644 index 0000000..17e85d2 --- /dev/null +++ b/web_demo.py @@ -0,0 +1,102 @@ +from transformers import AutoModel, AutoTokenizer +import gradio as gr +import mdtex2html + +tokenizer = AutoTokenizer.from_pretrained("/mnt/vepfs/workspace/zxdu/chatglm-6b-v2-dev", trust_remote_code=True) +model = AutoModel.from_pretrained("/mnt/vepfs/workspace/zxdu/chatglm-6b-v2-dev", trust_remote_code=True, device='cuda') +model = model.eval() + +"""Override Chatbot.postprocess""" + + +def postprocess(self, y): + if y is None: + return [] + for i, (message, response) in enumerate(y): + y[i] = ( + None if message is None else mdtex2html.convert((message)), + None if response is None else mdtex2html.convert(response), + ) + return y + + +gr.Chatbot.postprocess = postprocess + + +def parse_text(text): + """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/""" + lines = text.split("\n") + lines = [line for line in lines if line != ""] + count = 0 + for i, line in enumerate(lines): + if "```" in line: + count += 1 + items = line.split('`') + if count % 2 == 1: + lines[i] = f'
'
+            else:
+                lines[i] = f'
' + else: + if i > 0: + if count % 2 == 1: + line = line.replace("`", "\`") + line = line.replace("<", "<") + line = line.replace(">", ">") + line = line.replace(" ", " ") + line = line.replace("*", "*") + line = line.replace("_", "_") + line = line.replace("-", "-") + line = line.replace(".", ".") + line = line.replace("!", "!") + line = line.replace("(", "(") + line = line.replace(")", ")") + line = line.replace("$", "$") + lines[i] = "
"+line + text = "".join(lines) + return text + + +def predict(input, chatbot, max_length, top_p, temperature, history, past_key_values): + chatbot.append((parse_text(input), "")) + for response, history, past_key_values in model.stream_chat(tokenizer, input, history, past_key_values=past_key_values, + max_length=max_length, top_p=top_p, temperature=temperature): + chatbot[-1] = (parse_text(input), parse_text(response)) + + yield chatbot, history, past_key_values + + +def reset_user_input(): + return gr.update(value='') + + +def reset_state(): + return [], [], None + + +with gr.Blocks() as demo: + gr.HTML("""

ChatGLM

""") + + chatbot = gr.Chatbot() + with gr.Row(): + with gr.Column(scale=4): + with gr.Column(scale=12): + user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style( + container=False) + with gr.Column(min_width=32, scale=1): + submitBtn = gr.Button("Submit", variant="primary") + with gr.Column(scale=1): + emptyBtn = gr.Button("Clear History") + max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True) + top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True) + temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True) + + history = gr.State([]) + past_key_values = gr.State(None) + + submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history, past_key_values], + [chatbot, history, past_key_values], show_progress=True) + submitBtn.click(reset_user_input, [], [user_input]) + + emptyBtn.click(reset_state, outputs=[chatbot, history, past_key_values], show_progress=True) + +demo.queue().launch(share=False, inbrowser=True)