From a6bf54739c61ac230e94f95ff209004221efeb86 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 24 Mar 2023 19:56:45 -0300 Subject: [PATCH] Revert models.py (accident) --- modules/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/models.py b/modules/models.py index c9f03588..ccb97da3 100644 --- a/modules/models.py +++ b/modules/models.py @@ -44,7 +44,7 @@ def load_model(model_name): shared.is_RWKV = model_name.lower().startswith('rwkv-') # Default settings - if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.wbits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]): + if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.gptq_bits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]): if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')): model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True) else: @@ -95,7 +95,7 @@ def load_model(model_name): return model, tokenizer # Quantized model - elif shared.args.wbits > 0: + elif shared.args.gptq_bits > 0: from modules.GPTQ_loader import load_quantized model = load_quantized(model_name)