mirror of https://github.com/THUDM/ChatGLM-6B
Merge branch 'dev' into main
commit
91e4f29563
19
web_demo3.py
19
web_demo3.py
|
@ -7,10 +7,9 @@ tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=
|
||||||
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
||||||
model = model.eval()
|
model = model.eval()
|
||||||
|
|
||||||
# MAX_TURNS = 20
|
|
||||||
# MAX_BOXES = MAX_TURNS * 2
|
|
||||||
|
|
||||||
"""Override Chatbot.postprocess"""
|
"""Override Chatbot.postprocess"""
|
||||||
|
|
||||||
|
|
||||||
def postprocess(self, y):
|
def postprocess(self, y):
|
||||||
if y is None:
|
if y is None:
|
||||||
return []
|
return []
|
||||||
|
@ -20,6 +19,8 @@ def postprocess(self, y):
|
||||||
None if response is None else mdtex2html.convert(response),
|
None if response is None else mdtex2html.convert(response),
|
||||||
)
|
)
|
||||||
return y
|
return y
|
||||||
|
|
||||||
|
|
||||||
gr.Chatbot.postprocess = postprocess
|
gr.Chatbot.postprocess = postprocess
|
||||||
|
|
||||||
|
|
||||||
|
@ -61,8 +62,10 @@ def predict(input, chatbot, max_length, top_p, temperature, history):
|
||||||
for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
|
for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
|
||||||
temperature=temperature):
|
temperature=temperature):
|
||||||
chatbot[-1] = (parse_text(input), parse_text(response))
|
chatbot[-1] = (parse_text(input), parse_text(response))
|
||||||
|
|
||||||
yield chatbot, history
|
yield chatbot, history
|
||||||
|
|
||||||
|
|
||||||
def reset_user_input():
|
def reset_user_input():
|
||||||
return gr.update(value='')
|
return gr.update(value='')
|
||||||
|
|
||||||
|
@ -70,6 +73,7 @@ def reset_user_input():
|
||||||
def reset_state():
|
def reset_state():
|
||||||
return [], []
|
return [], []
|
||||||
|
|
||||||
|
|
||||||
with gr.Blocks() as demo:
|
with gr.Blocks() as demo:
|
||||||
gr.HTML("""<h1 align="center">ChatGLM</h1>""")
|
gr.HTML("""<h1 align="center">ChatGLM</h1>""")
|
||||||
|
|
||||||
|
@ -90,13 +94,14 @@ with gr.Blocks() as demo:
|
||||||
|
|
||||||
history = gr.State([])
|
history = gr.State([])
|
||||||
|
|
||||||
user_input.submit(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], show_progress=True)
|
user_input.submit(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history],
|
||||||
|
show_progress=True)
|
||||||
user_input.submit(reset_user_input, [], [user_input])
|
user_input.submit(reset_user_input, [], [user_input])
|
||||||
|
|
||||||
submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], show_progress=True)
|
submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history],
|
||||||
|
show_progress=True)
|
||||||
submitBtn.click(reset_user_input, [], [user_input])
|
submitBtn.click(reset_user_input, [], [user_input])
|
||||||
|
|
||||||
emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
|
emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
|
||||||
|
|
||||||
|
demo.queue().launch(share=True, inbrowser=True)
|
||||||
demo.queue().launch(share=False, inbrowser=True)
|
|
||||||
|
|
Loading…
Reference in New Issue