From cf58058c3380df057015239cce0000ef33b2b7b0 Mon Sep 17 00:00:00 2001 From: Light Date: Thu, 13 Apr 2023 20:59:49 +0800 Subject: [PATCH] Change warmup_autotune to a negative switch. --- modules/GPTQ_loader.py | 2 +- modules/shared.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index 1cd3e5cd..0329c8ba 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -66,7 +66,7 @@ def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exc from quant import autotune_warmup, make_quant_attn # triton branch make_quant_attn(model) - if shared.args.warmup_autotune: + if not shared.args.no_warmup_autotune: autotune_warmup(model) except ImportError: # not triton branch pass diff --git a/modules/shared.py b/modules/shared.py index 563d52bb..41ca3132 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -117,7 +117,7 @@ parser.add_argument('--wbits', type=int, default=0, help='GPTQ: Load a pre-quant parser.add_argument('--model_type', type=str, help='GPTQ: Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.') parser.add_argument('--groupsize', type=int, default=-1, help='GPTQ: Group size.') parser.add_argument('--pre_layer', type=int, default=0, help='GPTQ: The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.') -parser.add_argument('--warmup_autotune', action=argparse.BooleanOptionalAction, default=True, help='GPTQ: Enable warmup autotune. Only usable for triton.') +parser.add_argument('--no-warmup_autotune', action='store_true', help='GPTQ: Disable warmup autotune for triton.') # FlexGen parser.add_argument('--flexgen', action='store_true', help='Enable the use of FlexGen offloading.')