diff --git a/README_zh-CN.md b/README_zh-CN.md index 0b24a7f..ef34683 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -130,7 +130,12 @@ import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("internlm/internlm2-chat-7b", trust_remote_code=True) # 设置`torch_dtype=torch.float16`来将模型精度指定为torch.float16,否则可能会因为您的硬件原因造成显存不足的问题。 -model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-chat-7b", trust_remote_code=True, torch_dtype=torch.float16).cuda() +model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-chat-7b", device_map="auto",trust_remote_code=True, torch_dtype=torch.float16) +# (可选) 如果在低资源设备上,可以通过bitsandbytes加载4-bit或8-bit量化的模型,进一步节省GPU显存. + # 4-bit 量化的 InternLM 7B 大约会消耗 8GB 显存. + # pip install -U bitsandbytes + # 8-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_8bit=True) + # 4-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_4bit=True) model = model.eval() response, history = model.chat(tokenizer, "你好", history=[]) print(response) @@ -149,6 +154,11 @@ from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm2-chat-7b') tokenizer = AutoTokenizer.from_pretrained(model_dir, device_map="auto", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16) +# (可选) 如果在低资源设备上,可以通过bitsandbytes加载4-bit或8-bit量化的模型,进一步节省GPU显存. + # 4-bit 量化的 InternLM 7B 大约会消耗 8GB 显存. + # pip install -U bitsandbytes + # 8-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_8bit=True) + # 4-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_4bit=True) model = model.eval() response, history = model.chat(tokenizer, "hello", history=[]) print(response)