Fix loading sharted GGUF models through llamacpp_HF

This commit is contained in:
oobabooga 2024-04-11 14:50:05 -07:00
parent fd4e46bce2
commit e158299fb4

View File

@ -192,7 +192,7 @@ class LlamacppHF(PreTrainedModel):
if path.is_file():
model_file = path
else:
model_file = list(path.glob('*.gguf'))[0]
model_file = sorted(path.glob('*.gguf'))[0]
logger.info(f"llama.cpp weights detected: {model_file}\n")