From eddd0164495c728066ff645b67f670cc39fc4965 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 21 Apr 2023 12:41:27 -0300 Subject: [PATCH] Minor deletion --- modules/shared.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index d6cdc0bc..a08f134f 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -123,7 +123,6 @@ parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized m parser.add_argument('--model_type', type=str, help='Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.') parser.add_argument('--groupsize', type=int, default=-1, help='Group size.') parser.add_argument('--pre_layer', type=int, default=0, help='The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.') -parser.add_argument('--file-path', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.') parser.add_argument('--monkey-patch', action='store_true', help='Apply the monkey patch for using LoRAs with quantized models.') parser.add_argument('--no-quant_attn', action='store_true', help='(triton) Disable quant attention. If you encounter incoherent results try disabling this.') parser.add_argument('--no-warmup_autotune', action='store_true', help='(triton) Disable warmup autotune.')