fixed the issue that the HF model spontaneously conducted multiple rounds of Q&A and stream_chat method generates garbled characters

Signed-off-by: daijun1 <daijun1@eccom.com.cn>
pull/560/head
daijun1 2023-12-26 16:32:33 +08:00
parent ac7509389b
commit 97e7d03d09
1 changed files with 9 additions and 1 deletions

View File

@ -844,6 +844,7 @@ class InternLMForCausalLM(InternLMPreTrainedModel):
self.query = query
self.history = history
self.response = ""
self.chche = []
self.received_inputs = False
self.queue.put((self.response, history + [(self.query, self.response)]))
@ -858,11 +859,18 @@ class InternLMForCausalLM(InternLMPreTrainedModel):
self.received_inputs = True
return
token = self.tokenizer.decode([value[-1]], skip_special_tokens=True)
self.chche.extend(value.tolist())
token = self.tokenizer.decode(self.chche, skip_special_tokens=True)
if " " in token and len(token) <= 5:
return
if token.strip() != "<eoa>":
self.response = self.response + token
history = self.history + [(self.query, self.response)]
self.queue.put((self.response, history))
self.chche = []
else:
self.end()
def end(self):
self.queue.put(None)