ChatGLM-6B/web_demo.py

44 lines
1.8 KiB
Python
Raw Normal View History

2023-03-13 12:06:14 +00:00
import gradio as gr
2023-03-26 07:29:15 +00:00
from utils import load_model_and_tokenizer
2023-03-13 12:06:14 +00:00
2023-03-26 07:29:15 +00:00
model, tokenizer = load_model_and_tokenizer("THUDM/chatglm-6b", num_gpus=1)
2023-03-13 12:06:14 +00:00
MAX_TURNS = 20
MAX_BOXES = MAX_TURNS * 2
def predict(input, max_length, top_p, temperature, history=None):
2023-03-14 12:30:25 +00:00
if history is None:
history = []
2023-03-19 06:33:05 +00:00
for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
temperature=temperature):
updates = []
for query, response in history:
updates.append(gr.update(visible=True, value="用户:" + query))
updates.append(gr.update(visible=True, value="ChatGLM-6B" + response))
if len(updates) < MAX_BOXES:
updates = updates + [gr.Textbox.update(visible=False)] * (MAX_BOXES - len(updates))
yield [history] + updates
2023-03-13 12:06:14 +00:00
with gr.Blocks() as demo:
state = gr.State([])
text_boxes = []
for i in range(MAX_BOXES):
if i % 2 == 0:
2023-03-14 06:55:49 +00:00
text_boxes.append(gr.Markdown(visible=False, label="提问:"))
2023-03-13 12:06:14 +00:00
else:
2023-03-14 06:55:49 +00:00
text_boxes.append(gr.Markdown(visible=False, label="回复:"))
2023-03-13 12:06:14 +00:00
with gr.Row():
with gr.Column(scale=4):
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter", lines=11).style(
container=False)
2023-03-13 12:06:14 +00:00
with gr.Column(scale=1):
max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
2023-03-13 12:06:14 +00:00
button = gr.Button("Generate")
button.click(predict, [txt, max_length, top_p, temperature, state], [state] + text_boxes)
2023-03-20 07:34:36 +00:00
demo.queue().launch(share=False, inbrowser=True)