mirror of https://github.com/InternLM/InternLM
fix typos and try pass lint
parent
dbec726c62
commit
a184b29316
|
@ -24,8 +24,7 @@ jobs:
|
||||||
- name: lint-isort
|
- name: lint-isort
|
||||||
run: |
|
run: |
|
||||||
pip install isort==5.12.0
|
pip install isort==5.12.0
|
||||||
isort --check --profile=black ./internlm/*
|
isort --check --profile=black .
|
||||||
isort --check --profile=black ./train.py
|
|
||||||
|
|
||||||
- name: lint-black
|
- name: lint-black
|
||||||
run: |
|
run: |
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
[🤔Reporting Issues](https://github.com/InternLM/InternLM/issues/new)
|
[🤔Reporting Issues](https://github.com/InternLM/InternLM/issues/new)
|
||||||
|
|
||||||
[English](./README.md) |
|
[English](./README.md) |
|
||||||
[简体中文](./README_zh-CN.md) |
|
[简体中文](./README_zh-CN.md)
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
[🤔提交反馈](https://github.com/InternLM/InternLM/issues/new)
|
[🤔提交反馈](https://github.com/InternLM/InternLM/issues/new)
|
||||||
|
|
||||||
[English](./README.md) |
|
[English](./README.md) |
|
||||||
[简体中文](./README_zh-CN.md) |
|
[简体中文](./README_zh-CN.md)
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Chat
|
# Chat
|
||||||
|
|
||||||
English | [简体中文](lmdeploy_zh_zh-CN.md)
|
English | [简体中文](./README_zh-CN.md)
|
||||||
|
|
||||||
This document briefly shows how to use [Transformers](#import-from-transformers), [ModelScope](#import-from-modelscope), and [Web demos](#dialogue) to conduct inference with InternLM2-Chat.
|
This document briefly shows how to use [Transformers](#import-from-transformers), [ModelScope](#import-from-modelscope), and [Web demos](#dialogue) to conduct inference with InternLM2-Chat.
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# 对话
|
# 对话
|
||||||
|
|
||||||
[English](lmdeploy.md) | 简体中文
|
[English](./README.md) | 简体中文
|
||||||
|
|
||||||
本文介绍采用 [Transformers](#import-from-transformers)、[ModelScope](#import-from-modelscope)、[Web demos](#dialogue)
|
本文介绍采用 [Transformers](#import-from-transformers)、[ModelScope](#import-from-modelscope)、[Web demos](#dialogue)
|
||||||
对 InternLM2-Chat 进行推理。
|
对 InternLM2-Chat 进行推理。
|
||||||
|
|
|
@ -26,11 +26,11 @@ def on_btn_click():
|
||||||
@st.cache_resource
|
@st.cache_resource
|
||||||
def load_model():
|
def load_model():
|
||||||
model = (
|
model = (
|
||||||
AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
|
AutoModelForCausalLM.from_pretrained("internlm/internlm2-chat-7b", trust_remote_code=True)
|
||||||
.to(torch.bfloat16)
|
.to(torch.bfloat16)
|
||||||
.cuda()
|
.cuda()
|
||||||
)
|
)
|
||||||
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm2-chat-7b", trust_remote_code=True)
|
||||||
return model, tokenizer
|
return model, tokenizer
|
||||||
|
|
||||||
|
|
||||||
|
@ -46,9 +46,9 @@ def prepare_generation_config():
|
||||||
return generation_config
|
return generation_config
|
||||||
|
|
||||||
|
|
||||||
user_prompt = "<|User|>:{user}\n"
|
user_prompt = "[UNUSED_TOKEN_146]user\n{user}[UNUSED_TOKEN_145]\n"
|
||||||
robot_prompt = "<|Bot|>:{robot}<eoa>\n"
|
robot_prompt = "[UNUSED_TOKEN_146]assistant\n{robot}[UNUSED_TOKEN_145]\n"
|
||||||
cur_query_prompt = "<|User|>:{user}<eoh>\n<|Bot|>:"
|
cur_query_prompt = "[UNUSED_TOKEN_146]user\n{user}[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n"
|
||||||
|
|
||||||
|
|
||||||
def combine_history(prompt):
|
def combine_history(prompt):
|
||||||
|
@ -73,10 +73,10 @@ def main():
|
||||||
model, tokenizer = load_model()
|
model, tokenizer = load_model()
|
||||||
print("load model end.")
|
print("load model end.")
|
||||||
|
|
||||||
user_avator = "docs/imgs/user.png"
|
user_avator = "assets/user.png"
|
||||||
robot_avator = "docs/imgs/robot.png"
|
robot_avator = "assets/robot.png"
|
||||||
|
|
||||||
st.title("InternLM-Chat-7B")
|
st.title("InternLM2-Chat-7B")
|
||||||
|
|
||||||
generation_config = prepare_generation_config()
|
generation_config = prepare_generation_config()
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue