mirror of https://github.com/InternLM/InternLM
fix typos and try pass lint
parent
dbec726c62
commit
a184b29316
|
@ -12,31 +12,30 @@ jobs:
|
|||
lint-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: lint-flake8
|
||||
run: |
|
||||
pip install flake8==v3.8.4
|
||||
FLAKE_DISABLE_LIST="F403,F405,W504,W503,E203"
|
||||
flake8 --max-line-length=120 --ignore=$FLAKE_DISABLE_LIST ./internlm/*
|
||||
flake8 --max-line-length=120 --ignore=$FLAKE_DISABLE_LIST ./train.py
|
||||
- name: lint-flake8
|
||||
run: |
|
||||
pip install flake8==v3.8.4
|
||||
FLAKE_DISABLE_LIST="F403,F405,W504,W503,E203"
|
||||
flake8 --max-line-length=120 --ignore=$FLAKE_DISABLE_LIST ./internlm/*
|
||||
flake8 --max-line-length=120 --ignore=$FLAKE_DISABLE_LIST ./train.py
|
||||
|
||||
- name: lint-isort
|
||||
run: |
|
||||
pip install isort==5.12.0
|
||||
isort --check --profile=black ./internlm/*
|
||||
isort --check --profile=black ./train.py
|
||||
- name: lint-isort
|
||||
run: |
|
||||
pip install isort==5.12.0
|
||||
isort --check --profile=black .
|
||||
|
||||
- name: lint-black
|
||||
run: |
|
||||
pip install black==22.8.0
|
||||
BLACK_EXCLUDE_SETTINGS='\.venv/|\.local/|\.cache/|\.git/'
|
||||
black --line-length=120 --check --exclude $BLACK_EXCLUDE_SETTINGS ./internlm/*
|
||||
black --line-length=120 --check --exclude $BLACK_EXCLUDE_SETTINGS ./train.py
|
||||
- name: lint-black
|
||||
run: |
|
||||
pip install black==22.8.0
|
||||
BLACK_EXCLUDE_SETTINGS='\.venv/|\.local/|\.cache/|\.git/'
|
||||
black --line-length=120 --check --exclude $BLACK_EXCLUDE_SETTINGS ./internlm/*
|
||||
black --line-length=120 --check --exclude $BLACK_EXCLUDE_SETTINGS ./train.py
|
||||
|
||||
- name: lint-pylint
|
||||
run: |
|
||||
pip install pylint==v2.17.2
|
||||
PYLINT_DISABLE_LIST="C0114,C0415,W0212,W0235,W0238,W0621,C0103,R1735,C2801,E0402,C0412,W0719,R1728,W1514,W0718,W0105,W0707,C0209,W0703,W1203"
|
||||
pylint --rcfile .pylintrc --disable=$PYLINT_DISABLE_LIST ./internlm/*
|
||||
pylint --rcfile .pylintrc --disable=$PYLINT_DISABLE_LIST ./train.py
|
||||
- name: lint-pylint
|
||||
run: |
|
||||
pip install pylint==v2.17.2
|
||||
PYLINT_DISABLE_LIST="C0114,C0415,W0212,W0235,W0238,W0621,C0103,R1735,C2801,E0402,C0412,W0719,R1728,W1514,W0718,W0105,W0707,C0209,W0703,W1203"
|
||||
pylint --rcfile .pylintrc --disable=$PYLINT_DISABLE_LIST ./internlm/*
|
||||
pylint --rcfile .pylintrc --disable=$PYLINT_DISABLE_LIST ./train.py
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
[🤔Reporting Issues](https://github.com/InternLM/InternLM/issues/new)
|
||||
|
||||
[English](./README.md) |
|
||||
[简体中文](./README_zh-CN.md) |
|
||||
[简体中文](./README_zh-CN.md)
|
||||
|
||||
</div>
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
[🤔提交反馈](https://github.com/InternLM/InternLM/issues/new)
|
||||
|
||||
[English](./README.md) |
|
||||
[简体中文](./README_zh-CN.md) |
|
||||
[简体中文](./README_zh-CN.md)
|
||||
|
||||
</div>
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Chat
|
||||
|
||||
English | [简体中文](lmdeploy_zh_zh-CN.md)
|
||||
English | [简体中文](./README_zh-CN.md)
|
||||
|
||||
This document briefly shows how to use [Transformers](#import-from-transformers), [ModelScope](#import-from-modelscope), and [Web demos](#dialogue) to conduct inference with InternLM2-Chat.
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# 对话
|
||||
|
||||
[English](lmdeploy.md) | 简体中文
|
||||
[English](./README.md) | 简体中文
|
||||
|
||||
本文介绍采用 [Transformers](#import-from-transformers)、[ModelScope](#import-from-modelscope)、[Web demos](#dialogue)
|
||||
对 InternLM2-Chat 进行推理。
|
||||
|
|
|
@ -26,11 +26,11 @@ def on_btn_click():
|
|||
@st.cache_resource
|
||||
def load_model():
|
||||
model = (
|
||||
AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
|
||||
AutoModelForCausalLM.from_pretrained("internlm/internlm2-chat-7b", trust_remote_code=True)
|
||||
.to(torch.bfloat16)
|
||||
.cuda()
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm2-chat-7b", trust_remote_code=True)
|
||||
return model, tokenizer
|
||||
|
||||
|
||||
|
@ -46,9 +46,9 @@ def prepare_generation_config():
|
|||
return generation_config
|
||||
|
||||
|
||||
user_prompt = "<|User|>:{user}\n"
|
||||
robot_prompt = "<|Bot|>:{robot}<eoa>\n"
|
||||
cur_query_prompt = "<|User|>:{user}<eoh>\n<|Bot|>:"
|
||||
user_prompt = "[UNUSED_TOKEN_146]user\n{user}[UNUSED_TOKEN_145]\n"
|
||||
robot_prompt = "[UNUSED_TOKEN_146]assistant\n{robot}[UNUSED_TOKEN_145]\n"
|
||||
cur_query_prompt = "[UNUSED_TOKEN_146]user\n{user}[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n"
|
||||
|
||||
|
||||
def combine_history(prompt):
|
||||
|
@ -73,10 +73,10 @@ def main():
|
|||
model, tokenizer = load_model()
|
||||
print("load model end.")
|
||||
|
||||
user_avator = "docs/imgs/user.png"
|
||||
robot_avator = "docs/imgs/robot.png"
|
||||
user_avator = "assets/user.png"
|
||||
robot_avator = "assets/robot.png"
|
||||
|
||||
st.title("InternLM-Chat-7B")
|
||||
st.title("InternLM2-Chat-7B")
|
||||
|
||||
generation_config = prepare_generation_config()
|
||||
|
||||
|
|
Loading…
Reference in New Issue