diff --git a/README.md b/README.md index d5c9d42..5b002e0 100644 --- a/README.md +++ b/README.md @@ -138,7 +138,12 @@ import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("internlm/internlm2-chat-7b", trust_remote_code=True) # Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error. -model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-chat-7b", trust_remote_code=True, torch_dtype=torch.float16).cuda() +model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-chat-7b", device_map="auto", trust_remote_code=True, torch_dtype=torch.float16) +# (Optional) If on low resource devices, you can load model in 4-bit or 8-bit to further save GPU memory via bitsandbytes. + # InternLM 7B in 4bit will cost nearly 8GB GPU memory. + # pip install -U bitsandbytes + # 8-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_8bit=True) + # 4-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_4bit=True) model = model.eval() response, history = model.chat(tokenizer, "hello", history=[]) print(response) @@ -158,6 +163,11 @@ model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm2-chat-7b') tokenizer = AutoTokenizer.from_pretrained(model_dir, device_map="auto", trust_remote_code=True) # Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error. model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16) +# (Optional) If on low resource devices, you can load model in 4-bit or 8-bit to further save GPU memory via bitsandbytes. + # InternLM 7B in 4bit will cost nearly 8GB GPU memory. + # pip install -U bitsandbytes + # 8-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_8bit=True) + # 4-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_4bit=True) model = model.eval() response, history = model.chat(tokenizer, "hello", history=[]) print(response) diff --git a/README_zh-CN.md b/README_zh-CN.md index f682cd2..2892a1a 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -136,7 +136,12 @@ import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("internlm/internlm2-chat-7b", trust_remote_code=True) # 设置`torch_dtype=torch.float16`来将模型精度指定为torch.float16,否则可能会因为您的硬件原因造成显存不足的问题。 -model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-chat-7b", trust_remote_code=True, torch_dtype=torch.float16).cuda() +model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-chat-7b", device_map="auto",trust_remote_code=True, torch_dtype=torch.float16) +# (可选) 如果在低资源设备上,可以通过bitsandbytes加载4-bit或8-bit量化的模型,进一步节省GPU显存. + # 4-bit 量化的 InternLM 7B 大约会消耗 8GB 显存. + # pip install -U bitsandbytes + # 8-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_8bit=True) + # 4-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_4bit=True) model = model.eval() response, history = model.chat(tokenizer, "你好", history=[]) print(response) @@ -155,6 +160,11 @@ from modelscope import snapshot_download, AutoTokenizer, AutoModelForCausalLM model_dir = snapshot_download('Shanghai_AI_Laboratory/internlm2-chat-7b') tokenizer = AutoTokenizer.from_pretrained(model_dir, device_map="auto", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16) +# (可选) 如果在低资源设备上,可以通过bitsandbytes加载4-bit或8-bit量化的模型,进一步节省GPU显存. + # 4-bit 量化的 InternLM 7B 大约会消耗 8GB 显存. + # pip install -U bitsandbytes + # 8-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_8bit=True) + # 4-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_4bit=True) model = model.eval() response, history = model.chat(tokenizer, "hello", history=[]) print(response)