From dcfd866402dfbbc849bd4441fd1de9448de18c75 Mon Sep 17 00:00:00 2001 From: EyeDeck Date: Thu, 23 Mar 2023 21:31:34 -0400 Subject: [PATCH] Allow loading of .safetensors through GPTQ-for-LLaMa --- modules/GPTQ_loader.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index 32a5458f..bec6c66f 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -37,21 +37,23 @@ def load_quantized(model_name): path_to_model = Path(f'models/{model_name}') if path_to_model.name.lower().startswith('llama-7b'): - pt_model = f'llama-7b-{shared.args.gptq_bits}bit.pt' + pt_model = f'llama-7b-{shared.args.gptq_bits}bit' elif path_to_model.name.lower().startswith('llama-13b'): - pt_model = f'llama-13b-{shared.args.gptq_bits}bit.pt' + pt_model = f'llama-13b-{shared.args.gptq_bits}bit' elif path_to_model.name.lower().startswith('llama-30b'): - pt_model = f'llama-30b-{shared.args.gptq_bits}bit.pt' + pt_model = f'llama-30b-{shared.args.gptq_bits}bit' elif path_to_model.name.lower().startswith('llama-65b'): - pt_model = f'llama-65b-{shared.args.gptq_bits}bit.pt' + pt_model = f'llama-65b-{shared.args.gptq_bits}bit' else: - pt_model = f'{model_name}-{shared.args.gptq_bits}bit.pt' + pt_model = f'{model_name}-{shared.args.gptq_bits}bit' - # Try to find the .pt both in models/ and in the subfolder + # Try to find the .safetensors or .pt both in models/ and in the subfolder pt_path = None - for path in [Path(p) for p in [f"models/{pt_model}", f"{path_to_model}/{pt_model}"]]: + for path in [Path(p+ext) for ext in ['.safetensors', '.pt'] for p in [f"models/{pt_model}", f"{path_to_model}/{pt_model}"]]: if path.exists(): + print(f"Found {path}") pt_path = path + break if not pt_path: print(f"Could not find {pt_model}, exiting...")