From f8e973341221612f020f69c0b1a0cdb373030621 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 28 Sep 2023 19:32:35 -0700 Subject: [PATCH] Minor syntax change --- modules/models_settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/models_settings.py b/modules/models_settings.py index b3611a94..c18a3a82 100644 --- a/modules/models_settings.py +++ b/modules/models_settings.py @@ -79,7 +79,7 @@ def infer_loader(model_name, model_settings): path_to_model = Path(f'{shared.args.model_dir}/{model_name}') if not path_to_model.exists(): loader = None - elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0): + elif (path_to_model / 'quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0): loader = 'AutoGPTQ' elif len(list(path_to_model.glob('*.gguf'))) > 0: loader = 'llama.cpp'