text-generation-webui/modules/GPTQ_loader.py

99 lines
3.9 KiB
Python
Raw Normal View History

import re
2023-03-12 14:12:34 +00:00
import sys
from pathlib import Path
import accelerate
import torch
import modules.shared as shared
sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
2023-03-13 19:45:08 +00:00
import llama
import llama_inference_offload
2023-03-13 19:45:08 +00:00
import opt
2023-03-12 14:12:34 +00:00
2023-03-13 19:11:32 +00:00
def load_quantized(model_name):
if not shared.args.model_type:
2023-03-13 19:11:32 +00:00
# Try to determine model type from model name
if model_name.lower().startswith(('llama', 'alpaca')):
model_type = 'llama'
elif model_name.lower().startswith(('opt', 'galactica')):
model_type = 'opt'
else:
print("Can't determine model type from model name. Please specify it manually using --model_type "
2023-03-13 19:11:32 +00:00
"argument")
exit()
else:
model_type = shared.args.model_type.lower()
2023-03-13 19:11:32 +00:00
if model_type == 'llama':
if not shared.args.pre_layer:
load_quant = llama.load_quant
else:
load_quant = llama_inference_offload.load_quant
elif model_type == 'opt':
2023-03-13 19:45:08 +00:00
load_quant = opt.load_quant
2023-03-12 14:12:34 +00:00
else:
print("Unknown pre-quantized model type specified. Only 'llama' and 'opt' are supported")
exit()
2023-03-12 14:12:34 +00:00
# Now we are going to try to locate the quantized model file.
2023-03-12 14:12:34 +00:00
path_to_model = Path(f'models/{model_name}')
found_pts = list(path_to_model.glob("*.pt"))
found_safetensors = list(path_to_model.glob("*.safetensors"))
pt_path = None
if len(found_pts) == 1:
pt_path = found_pts[0]
elif len(found_safetensors) == 1:
pt_path = found_safetensors[0]
2023-03-13 19:11:32 +00:00
else:
if path_to_model.name.lower().startswith('llama-7b'):
pt_model = f'llama-7b-{shared.args.wbits}bit'
elif path_to_model.name.lower().startswith('llama-13b'):
pt_model = f'llama-13b-{shared.args.wbits}bit'
elif path_to_model.name.lower().startswith('llama-30b'):
pt_model = f'llama-30b-{shared.args.wbits}bit'
elif path_to_model.name.lower().startswith('llama-65b'):
pt_model = f'llama-65b-{shared.args.wbits}bit'
else:
pt_model = f'{model_name}-{shared.args.wbits}bit'
2023-03-12 14:12:34 +00:00
# Try to find the .safetensors or .pt both in models/ and in the subfolder
for path in [Path(p+ext) for ext in ['.safetensors', '.pt'] for p in [f"models/{pt_model}", f"{path_to_model}/{pt_model}"]]:
if path.exists():
print(f"Found {path}")
pt_path = path
break
2023-03-12 14:12:34 +00:00
if not pt_path:
print("Could not find the quantized model in .pt or .safetensors format, exiting...")
2023-03-12 14:12:34 +00:00
exit()
2023-03-20 19:40:08 +00:00
# qwopqwop200's offload
if shared.args.pre_layer:
model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize, shared.args.pre_layer)
else:
model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize)
2023-03-12 14:12:34 +00:00
2023-03-20 19:40:08 +00:00
# accelerate offload (doesn't work properly)
if shared.args.gpu_memory:
memory_map = list(map(lambda x : x.strip(), shared.args.gpu_memory))
max_cpu_memory = shared.args.cpu_memory.strip() if shared.args.cpu_memory is not None else '99GiB'
max_memory = {}
for i in range(len(memory_map)):
max_memory[i] = f'{memory_map[i]}GiB' if not re.match('.*ib$', memory_map[i].lower()) else memory_map[i]
max_memory['cpu'] = max_cpu_memory
2023-03-12 14:12:34 +00:00
device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LlamaDecoderLayer"])
print("Using the following device map for the 4-bit model:", device_map)
# https://huggingface.co/docs/accelerate/package_reference/big_modeling#accelerate.dispatch_model
model = accelerate.dispatch_model(model, device_map=device_map, offload_buffers=True)
2023-03-20 19:40:08 +00:00
# No offload
elif not shared.args.cpu:
model = model.to(torch.device('cuda:0'))
2023-03-12 14:12:34 +00:00
return model