From 265ba384b7e5e928d97d2749b25771b7d3d93fde Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 14 Mar 2023 07:56:31 -0300 Subject: [PATCH] Rename a file, add deprecation warning for --load-in-4bit --- modules/{quant_loader.py => GPTQ_loader.py} | 0 modules/models.py | 2 +- modules/shared.py | 6 ++++++ 3 files changed, 7 insertions(+), 1 deletion(-) rename modules/{quant_loader.py => GPTQ_loader.py} (100%) diff --git a/modules/quant_loader.py b/modules/GPTQ_loader.py similarity index 100% rename from modules/quant_loader.py rename to modules/GPTQ_loader.py diff --git a/modules/models.py b/modules/models.py index 46cd77ff..f4bb11fd 100644 --- a/modules/models.py +++ b/modules/models.py @@ -89,7 +89,7 @@ def load_model(model_name): # Quantized model elif shared.args.gptq_bits > 0: - from modules.quant_loader import load_quantized + from modules.GPTQ_loader import load_quantized model = load_quantized(model_name) diff --git a/modules/shared.py b/modules/shared.py index 3abdc551..ea2eb50b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -69,6 +69,7 @@ parser.add_argument('--chat', action='store_true', help='Launch the web UI in ch parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.') parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.') parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.') +parser.add_argument('--load-in-4bit', action='store_true', help='DEPRECATED: use --gptq-bits 4 instead.') parser.add_argument('--gptq-bits', type=int, default=0, help='Load a pre-quantized model with specified precision. 2, 3, 4 and 8bit are supported. Currently only works with LLaMA and OPT.') parser.add_argument('--gptq-model-type', type=str, help='Model type of pre-quantized model. Currently only LLaMa and OPT are supported.') parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.') @@ -95,3 +96,8 @@ parser.add_argument('--share', action='store_true', help='Create a public URL. T parser.add_argument('--auto-launch', action='store_true', default=False, help='Open the web UI in the default browser upon launch.') parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.') args = parser.parse_args() + +# Provisional, this will be deleted later +if args.load_in_4bit: + print("Warning: --load-in-4bit is deprecated and will be removed. Use --gptq-bits 4 instead.\n") + args.gptq_bits = 4