mirror of https://github.com/THUDM/ChatGLM-6B
Merge pull request #19 from OedoSoldier/main
Fix `clear` command performance in Windowspull/41/head
commit
afee32f2b1
|
@ -1,10 +1,13 @@
|
|||
import os
|
||||
import platform
|
||||
from transformers import AutoTokenizer, AutoModel
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
||||
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
||||
model = model.eval()
|
||||
|
||||
os_name = platform.system()
|
||||
|
||||
history = []
|
||||
print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序")
|
||||
while True:
|
||||
|
@ -13,8 +16,8 @@ while True:
|
|||
break
|
||||
if query == "clear":
|
||||
history = []
|
||||
os.system('clear')
|
||||
command = 'cls' if os_name == 'Windows' else 'clear'
|
||||
os.system(command)
|
||||
continue
|
||||
response, history = model.chat(tokenizer, query, history=history)
|
||||
print(f"ChatGLM-6B:{response}")
|
||||
|
||||
|
|
Loading…
Reference in New Issue