From bcb851482059a8cb456931c51a4254e9a9908a65 Mon Sep 17 00:00:00 2001 From: zxgov <64576649+zx2021@users.noreply.github.com> Date: Fri, 30 Jun 2023 10:27:48 +0800 Subject: [PATCH] Update web_demo2.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 使用更先进的方式加载模型 --- web_demo2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_demo2.py b/web_demo2.py index 6c66308..6b641fc 100644 --- a/web_demo2.py +++ b/web_demo2.py @@ -13,7 +13,7 @@ st.set_page_config( @st.cache_resource def get_model(): tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True) - model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).cuda() + model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True, device='cuda')#.cuda() # 多显卡支持,使用下面两行代替上面一行,将num_gpus改为你实际的显卡数量 # from utils import load_model_on_gpus # model = load_model_on_gpus("THUDM/chatglm2-6b", num_gpus=2)