mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Rename --llama-bits to --gptq-bits
This commit is contained in:
parent
fed3617f07
commit
65dda28c9d
@ -42,7 +42,7 @@ def load_model(model_name):
|
||||
shared.is_RWKV = model_name.lower().startswith('rwkv-')
|
||||
|
||||
# Default settings
|
||||
if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.load_in_4bit, shared.args.llama_bits > 0, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]):
|
||||
if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.load_in_4bit, shared.args.gptq_bits > 0, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]):
|
||||
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
|
||||
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
|
||||
else:
|
||||
@ -88,7 +88,7 @@ def load_model(model_name):
|
||||
return model, tokenizer
|
||||
|
||||
# 4-bit LLaMA
|
||||
elif shared.args.llama_bits > 0 or shared.args.load_in_4bit:
|
||||
elif shared.args.gptq_bits > 0 or shared.args.load_in_4bit:
|
||||
from modules.quantized_LLaMA import load_quantized_LLaMA
|
||||
|
||||
model = load_quantized_LLaMA(model_name)
|
||||
|
@ -16,7 +16,7 @@ def load_quantized_LLaMA(model_name):
|
||||
if shared.args.load_in_4bit:
|
||||
bits = 4
|
||||
else:
|
||||
bits = shared.args.llama_bits
|
||||
bits = shared.args.gptq_bits
|
||||
|
||||
path_to_model = Path(f'models/{model_name}')
|
||||
pt_model = ''
|
||||
|
@ -68,7 +68,7 @@ parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI i
|
||||
parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
|
||||
parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
|
||||
parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision. Currently only works with LLaMA.')
|
||||
parser.add_argument('--llama-bits', type=int, default=0, help='Load a pre-quantized model with specified precision. 2, 3, 4 and 8bit are supported. Currently only works with LLaMA.')
|
||||
parser.add_argument('--gptq-bits', type=int, default=0, help='Load a pre-quantized model with specified precision. 2, 3, 4 and 8bit are supported. Currently only works with LLaMA.')
|
||||
parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
|
||||
parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
|
||||
parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
|
||||
|
Loading…
Reference in New Issue
Block a user