mirror of https://github.com/InternLM/InternLM
Update modeling_internlm.py
fixed the issue that the HF model spontaneously conducted multiple rounds of Q&A and stream_chat method generates garbled characterspull/560/head
parent
97e7d03d09
commit
508711cc97
|
@ -861,7 +861,7 @@ class InternLMForCausalLM(InternLMPreTrainedModel):
|
|||
|
||||
self.chche.extend(value.tolist())
|
||||
token = self.tokenizer.decode(self.chche, skip_special_tokens=True)
|
||||
if " " in token and len(token) <= 5:
|
||||
if "<EFBFBD>" in token and len(token) <= 5:
|
||||
return
|
||||
|
||||
if token.strip() != "<eoa>":
|
||||
|
|
Loading…
Reference in New Issue