From b9ce4fd8b0cb84111b730513fca35655fec8c501 Mon Sep 17 00:00:00 2001 From: braisedpork1964 <497494458@qq.com> Date: Mon, 13 Jan 2025 07:57:45 +0000 Subject: [PATCH] update identification recognition --- chat/web_demo.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/chat/web_demo.py b/chat/web_demo.py index ef8c43d..6ee1aef 100644 --- a/chat/web_demo.py +++ b/chat/web_demo.py @@ -226,8 +226,12 @@ def combine_history(prompt, deepthink=False, start=0, stop=None): else: messages.append(message) meta_instruction = ( - ( - """You are an expert mathematician with extensive experience in mathematical competitions. You approach problems through systematic thinking and rigorous reasoning. When solving problems, follow these thought processes: + 'You are InternLM (书生·浦语), a helpful, honest, ' + 'and harmless AI assistant developed by Shanghai ' + 'AI Laboratory (上海人工智能实验室).' + ) + if deepthink: + meta_instruction += """\nYou are an expert mathematician with extensive experience in mathematical competitions. You approach problems through systematic thinking and rigorous reasoning. When solving problems, follow these thought processes: ## Deep Understanding Take time to fully comprehend the problem before attempting a solution. Consider: - What is the real question being asked? @@ -272,14 +276,6 @@ When you're ready, present your complete solution with: - Thorough verification Focus on clear, logical progression of ideas and thorough explanation of your mathematical reasoning. Provide answers in the same language as the user asking the question, repeat the final answer using a '\\boxed{}' without any units, you have [[8192]] tokens to complete the answer. """ - ) - if deepthink - else ( - 'You are InternLM (书生·浦语), a helpful, honest, ' - 'and harmless AI assistant developed by Shanghai ' - 'AI Laboratory (上海人工智能实验室).' - ) - ) total_prompt = f'<|im_start|>system\n{meta_instruction}<|im_end|>\n' for message in messages: cur_content = message['content'] @@ -346,6 +342,7 @@ def main(): message_placeholder.markdown(postprocess(cur_response) + '▌') message_placeholder.markdown(postprocess(cur_response)) deepthink_message['content'] = cur_response + torch.cuda.empty_cache() else: st.markdown(postprocess(deepthink_message['content'])) else: