diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..f43052c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,8 @@ +FROM pytorch/pytorch:1.13.1-cuda11.6-cudnn8-runtime +COPY . . +RUN pip3 install -r requirements.txt +ENV model_path="/model" + +EXPOSE 7860 + +CMD [ "python3","web_demo.py" ] \ No newline at end of file diff --git a/README.md b/README.md index c63913c..6091dc9 100644 --- a/README.md +++ b/README.md @@ -133,6 +133,12 @@ curl -X POST "http://127.0.0.1:8000" \ } ``` +### 在docker容器中部署 +```bash +docker build -t chatglm . +docker run -v /the/model/path:/model chatglm +``` + ## 低成本部署 ### 模型量化 默认情况下,模型以 FP16 精度加载,运行上述代码需要大概 13GB 显存。如果你的 GPU 显存有限,可以尝试以量化方式加载模型,使用方法如下: @@ -156,8 +162,6 @@ model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=Tru model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4-qe", trust_remote_code=True).half().cuda() ``` - - ### CPU 部署 如果你没有 GPU 硬件的话,也可以在 CPU 上进行推理,但是推理速度会更慢。使用方法如下(需要大概 32GB 内存) ```python diff --git a/web_demo.py b/web_demo.py index 88a6dc8..9d4a25a 100644 --- a/web_demo.py +++ b/web_demo.py @@ -1,10 +1,14 @@ from transformers import AutoModel, AutoTokenizer import gradio as gr - -tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() +import os +modelPath = os.getenv('model_path') +modelPath = modelPath if modelPath!="" else "THUDM/chatglm-6b" +tokenizer = AutoTokenizer.from_pretrained(modelPath, trust_remote_code=True) +model = AutoModel.from_pretrained(modelPath, trust_remote_code=True).half().cuda() model = model.eval() + + MAX_TURNS = 20 MAX_BOXES = MAX_TURNS * 2 @@ -42,4 +46,4 @@ with gr.Blocks() as demo: temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True) button = gr.Button("Generate") button.click(predict, [txt, max_length, top_p, temperature, state], [state] + text_boxes) -demo.queue().launch(share=False, inbrowser=True) +demo.queue().launch(share=False, inbrowser=True,server_name="0.0.0.0")