Change warmup_autotune to a negative switch.

This commit is contained in:
Light 2023-04-13 20:59:49 +08:00
parent 15d5a043f2
commit cf58058c33
2 changed files with 2 additions and 2 deletions

View File

@ -66,7 +66,7 @@ def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exc
from quant import autotune_warmup, make_quant_attn
# triton branch
make_quant_attn(model)
if shared.args.warmup_autotune:
if not shared.args.no_warmup_autotune:
autotune_warmup(model)
except ImportError: # not triton branch
pass

View File

@ -117,7 +117,7 @@ parser.add_argument('--wbits', type=int, default=0, help='GPTQ: Load a pre-quant
parser.add_argument('--model_type', type=str, help='GPTQ: Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.')
parser.add_argument('--groupsize', type=int, default=-1, help='GPTQ: Group size.')
parser.add_argument('--pre_layer', type=int, default=0, help='GPTQ: The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.')
parser.add_argument('--warmup_autotune', action=argparse.BooleanOptionalAction, default=True, help='GPTQ: Enable warmup autotune. Only usable for triton.')
parser.add_argument('--no-warmup_autotune', action='store_true', help='GPTQ: Disable warmup autotune for triton.')
# FlexGen
parser.add_argument('--flexgen', action='store_true', help='Enable the use of FlexGen offloading.')