From fdc3e5646a38b10fabd7f23384cdd283b9ec95bf Mon Sep 17 00:00:00 2001 From: OedoSoldier <31711261+OedoSoldier@users.noreply.github.com> Date: Tue, 14 Mar 2023 20:11:39 +0800 Subject: [PATCH 1/2] Fix `clear` command performance in Windows --- cli_demo.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/cli_demo.py b/cli_demo.py index 07cfd6f..7b76e01 100644 --- a/cli_demo.py +++ b/cli_demo.py @@ -1,10 +1,13 @@ import os +import platform from transformers import AutoTokenizer, AutoModel -tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() +tokenizer = AutoTokenizer.from_pretrained("chatglm-6b", trust_remote_code=True) +model = AutoModel.from_pretrained("chatglm-6b", trust_remote_code=True).half().cuda() model = model.eval() +os_name = platform.system() + history = [] print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序") while True: @@ -13,8 +16,8 @@ while True: break if query == "clear": history = [] - os.system('clear') + command = 'cls' if os_name == 'Windows' else 'clear' + os.system(command) continue response, history = model.chat(tokenizer, query, history=history) print(f"ChatGLM-6B:{response}") - From fb94fe80a03271a8cdf79ee99d62fe943729ce81 Mon Sep 17 00:00:00 2001 From: OedoSoldier <31711261+OedoSoldier@users.noreply.github.com> Date: Tue, 14 Mar 2023 20:12:36 +0800 Subject: [PATCH 2/2] Fix typo --- cli_demo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cli_demo.py b/cli_demo.py index 7b76e01..ccdb26b 100644 --- a/cli_demo.py +++ b/cli_demo.py @@ -2,8 +2,8 @@ import os import platform from transformers import AutoTokenizer, AutoModel -tokenizer = AutoTokenizer.from_pretrained("chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("chatglm-6b", trust_remote_code=True).half().cuda() +tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) +model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() model = model.eval() os_name = platform.system()