From 828a524f9a957f56c1985d71f941715727fd1db4 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 9 Mar 2023 15:50:26 -0300 Subject: [PATCH] Add LLaMA 4-bit support --- modules/models.py | 22 +++++++++++++++++++++- modules/shared.py | 1 + requirements.txt | 2 +- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/modules/models.py b/modules/models.py index 16ce6eb1..04235b52 100644 --- a/modules/models.py +++ b/modules/models.py @@ -1,5 +1,6 @@ import json import os +import sys import time import zipfile from pathlib import Path @@ -41,7 +42,7 @@ def load_model(model_name): shared.is_RWKV = model_name.lower().startswith('rwkv-') # Default settings - if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen or shared.is_RWKV): + if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.load_in_4bit or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen or shared.is_RWKV): if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')): model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True) else: @@ -86,6 +87,24 @@ def load_model(model_name): return model, tokenizer + # 4-bit LLaMA + elif shared.args.load_in_4bit: + sys.path.append(os.path.abspath(Path("repositories/GPTQ-for-LLaMa"))) + + from llama import load_quant + + path_to_model = Path(f'models/{model_name}') + pt_model = '' + if path_to_model.name.lower().startswith('llama-7b'): + pt_model = 'llama-7b-4bit.pt' + if path_to_model.name.lower().startswith('llama-13b'): + pt_model = 'llama-13b-4bit.pt' + if path_to_model.name.lower().startswith('llama-30b'): + pt_model = 'llama-30b-4bit.pt' + + model = load_quant(path_to_model, Path(f"models/{pt_model}"), 4) + model = model.to(torch.device('cuda:0')) + # Custom else: command = "AutoModelForCausalLM.from_pretrained" @@ -159,3 +178,4 @@ def load_soft_prompt(name): shared.soft_prompt_tensor = tensor return name + diff --git a/modules/shared.py b/modules/shared.py index b609045c..4c062fe9 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -68,6 +68,7 @@ parser.add_argument('--chat', action='store_true', help='Launch the web UI in ch parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.') parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.') parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.') +parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision. Currently only works with LLaMA.') parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.') parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.') parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.') diff --git a/requirements.txt b/requirements.txt index 47c56a45..6133f394 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,4 +6,4 @@ numpy rwkv==0.1.0 safetensors==0.2.8 sentencepiece -git+https://github.com/oobabooga/transformers@llama_push +git+https://github.com/zphang/transformers@llama_push