From e6c631aea4dd4596606b0f058173de223909d372 Mon Sep 17 00:00:00 2001 From: draff Date: Fri, 10 Mar 2023 21:36:45 +0000 Subject: [PATCH] Replace --load-in-4bit with --llama-bits Replaces --load-in-4bit with a more flexible --llama-bits arg to allow for 2 and 3 bit models as well. This commit also fixes a loading issue with .pt files which are not in the root of the models folder --- README.md | 2 +- modules/models.py | 17 +++++++++-------- modules/shared.py | 2 +- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index c329913d..5c560172 100644 --- a/README.md +++ b/README.md @@ -138,7 +138,7 @@ Optionally, you can use the following command-line flags: | `--cai-chat` | Launch the web UI in chat mode with a style similar to Character.AI's. If the file `img_bot.png` or `img_bot.jpg` exists in the same folder as server.py, this image will be used as the bot's profile picture. Similarly, `img_me.png` or `img_me.jpg` will be used as your profile picture. | | `--cpu` | Use the CPU to generate text.| | `--load-in-8bit` | Load the model with 8-bit precision.| -| `--load-in-4bit` | Load the model with 4-bit precision. Currently only works with LLaMA. | +| `--llama-bits` | Load LLaMA models with specified precision. 2, 3 and 4 bit are supported, use standard `--load-in-8bit` for 8bit precision. | | `--bf16` | Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. | | `--auto-devices` | Automatically split the model across the available GPU(s) and CPU.| | `--disk` | If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. | diff --git a/modules/models.py b/modules/models.py index f31d8b0d..467ffbee 100644 --- a/modules/models.py +++ b/modules/models.py @@ -42,7 +42,7 @@ def load_model(model_name): shared.is_RWKV = model_name.lower().startswith('rwkv-') # Default settings - if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.load_in_4bit or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen or shared.is_RWKV): + if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.llama_bits>0 or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen or shared.is_RWKV): if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')): model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True) else: @@ -88,23 +88,24 @@ def load_model(model_name): return model, tokenizer # 4-bit LLaMA - elif shared.args.load_in_4bit: + elif shared.args.llama_bits>0: sys.path.insert(0, os.path.abspath(Path("repositories/GPTQ-for-LLaMa"))) + bits = shared.args.llama_bits from llama import load_quant path_to_model = Path(f'models/{model_name}') pt_model = '' if path_to_model.name.lower().startswith('llama-7b'): - pt_model = 'llama-7b-4bit.pt' + pt_model = f'llama-7b-{bits}bit.pt' elif path_to_model.name.lower().startswith('llama-13b'): - pt_model = 'llama-13b-4bit.pt' + pt_model = f'llama-13b-{bits}bit.pt' elif path_to_model.name.lower().startswith('llama-30b'): - pt_model = 'llama-30b-4bit.pt' + pt_model = f'llama-30b-{bits}bit.pt' elif path_to_model.name.lower().startswith('llama-65b'): - pt_model = 'llama-65b-4bit.pt' + pt_model = f'llama-65b-{bits}bit.pt' else: - pt_model = f'{model_name}-4bit.pt' + pt_model = f'{model_name}-{bits}bit.pt' # Try to find the .pt both in models/ and in the subfolder pt_path = None @@ -116,7 +117,7 @@ def load_model(model_name): print(f"Could not find {pt_model}, exiting...") exit() - model = load_quant(path_to_model, Path(f"models/{pt_model}"), 4) + model = load_quant(path_to_model, Path(f"{pt_path}"), bits) # Multi-GPU setup if shared.args.gpu_memory: diff --git a/modules/shared.py b/modules/shared.py index 2acb047f..61d5a768 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -67,7 +67,7 @@ parser.add_argument('--chat', action='store_true', help='Launch the web UI in ch parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.') parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.') parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.') -parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision. Currently only works with LLaMA.') +parser.add_argument('--llama-bits', type=int, default=0, help='Load LLaMA models with specified precision. 2, 3 and 4 bit are supported, use standard `--load-in-8bit` for 8bit precision.') parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.') parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.') parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')