mirror of https://github.com/THUDM/ChatGLM2-6B
add dockerfile Support
parent
53f0106817
commit
5c7bd51840
|
@ -0,0 +1,17 @@
|
|||
## use pytorch images
|
||||
FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime
|
||||
## copy all files
|
||||
COPY . .
|
||||
## install tools
|
||||
RUN apt update && apt install -y git gcc
|
||||
## install requirements and cudatoolkit
|
||||
RUN pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple/ && \
|
||||
pip install icetk -i https://pypi.tuna.tsinghua.edu.cn/simple/ && \
|
||||
conda install cudatoolkit=11.7 -c nvidia
|
||||
## expose port
|
||||
EXPOSE 7860
|
||||
## run
|
||||
CMD [ "python3","web_demo.py" ]
|
||||
|
||||
## command for docker run
|
||||
## docker run --rm -it -v /path/to/chatglm2-6b-int4:/workspace/THUDM/chatglm2-6b --gpus=all -e NVIDIA_DRIVER_CAPABILITIES=compute,utility -e NVIDIA_VISIBLE_DEVICES=all -p 7860:7860 chatglm2:v1 python3 web_demo.py
|
|
@ -105,4 +105,4 @@ with gr.Blocks() as demo:
|
|||
|
||||
emptyBtn.click(reset_state, outputs=[chatbot, history, past_key_values], show_progress=True)
|
||||
|
||||
demo.queue().launch(share=False, inbrowser=True)
|
||||
demo.queue().launch(share=False, inbrowser=True, server_name="0.0.0.0")
|
||||
|
|
Loading…
Reference in New Issue