mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Revert GPTQ_loader.py (accident)
This commit is contained in:
parent
a6bf54739c
commit
9fa47c0eed
@ -14,7 +14,7 @@ import opt
|
|||||||
|
|
||||||
|
|
||||||
def load_quantized(model_name):
|
def load_quantized(model_name):
|
||||||
if not shared.args.model_type:
|
if not shared.args.gptq_model_type:
|
||||||
# Try to determine model type from model name
|
# Try to determine model type from model name
|
||||||
model_type = model_name.split('-')[0].lower()
|
model_type = model_name.split('-')[0].lower()
|
||||||
if model_type not in ('llama', 'opt'):
|
if model_type not in ('llama', 'opt'):
|
||||||
@ -22,10 +22,10 @@ def load_quantized(model_name):
|
|||||||
"argument")
|
"argument")
|
||||||
exit()
|
exit()
|
||||||
else:
|
else:
|
||||||
model_type = shared.args.model_type.lower()
|
model_type = shared.args.gptq_model_type.lower()
|
||||||
|
|
||||||
if model_type == 'llama':
|
if model_type == 'llama':
|
||||||
if not shared.args.pre_layer:
|
if not shared.args.gptq_pre_layer:
|
||||||
load_quant = llama.load_quant
|
load_quant = llama.load_quant
|
||||||
else:
|
else:
|
||||||
load_quant = llama_inference_offload.load_quant
|
load_quant = llama_inference_offload.load_quant
|
||||||
@ -37,15 +37,15 @@ def load_quantized(model_name):
|
|||||||
|
|
||||||
path_to_model = Path(f'models/{model_name}')
|
path_to_model = Path(f'models/{model_name}')
|
||||||
if path_to_model.name.lower().startswith('llama-7b'):
|
if path_to_model.name.lower().startswith('llama-7b'):
|
||||||
pt_model = f'llama-7b-{shared.args.wbits}bit.pt'
|
pt_model = f'llama-7b-{shared.args.gptq_bits}bit.pt'
|
||||||
elif path_to_model.name.lower().startswith('llama-13b'):
|
elif path_to_model.name.lower().startswith('llama-13b'):
|
||||||
pt_model = f'llama-13b-{shared.args.wbits}bit.pt'
|
pt_model = f'llama-13b-{shared.args.gptq_bits}bit.pt'
|
||||||
elif path_to_model.name.lower().startswith('llama-30b'):
|
elif path_to_model.name.lower().startswith('llama-30b'):
|
||||||
pt_model = f'llama-30b-{shared.args.wbits}bit.pt'
|
pt_model = f'llama-30b-{shared.args.gptq_bits}bit.pt'
|
||||||
elif path_to_model.name.lower().startswith('llama-65b'):
|
elif path_to_model.name.lower().startswith('llama-65b'):
|
||||||
pt_model = f'llama-65b-{shared.args.wbits}bit.pt'
|
pt_model = f'llama-65b-{shared.args.gptq_bits}bit.pt'
|
||||||
else:
|
else:
|
||||||
pt_model = f'{model_name}-{shared.args.wbits}bit.pt'
|
pt_model = f'{model_name}-{shared.args.gptq_bits}bit.pt'
|
||||||
|
|
||||||
# Try to find the .pt both in models/ and in the subfolder
|
# Try to find the .pt both in models/ and in the subfolder
|
||||||
pt_path = None
|
pt_path = None
|
||||||
@ -58,10 +58,10 @@ def load_quantized(model_name):
|
|||||||
exit()
|
exit()
|
||||||
|
|
||||||
# qwopqwop200's offload
|
# qwopqwop200's offload
|
||||||
if shared.args.pre_layer:
|
if shared.args.gptq_pre_layer:
|
||||||
model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.pre_layer)
|
model = load_quant(str(path_to_model), str(pt_path), shared.args.gptq_bits, shared.args.gptq_pre_layer)
|
||||||
else:
|
else:
|
||||||
model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits)
|
model = load_quant(str(path_to_model), str(pt_path), shared.args.gptq_bits)
|
||||||
|
|
||||||
# accelerate offload (doesn't work properly)
|
# accelerate offload (doesn't work properly)
|
||||||
if shared.args.gpu_memory:
|
if shared.args.gpu_memory:
|
||||||
|
Loading…
Reference in New Issue
Block a user