diff --git a/gpt4all-backend/llama.cpp-mainline b/gpt4all-backend/llama.cpp-mainline index 40bac11e..fadf1135 160000 --- a/gpt4all-backend/llama.cpp-mainline +++ b/gpt4all-backend/llama.cpp-mainline @@ -1 +1 @@ -Subproject commit 40bac11e427f2307305b86c322cb366bb95fcb8a +Subproject commit fadf1135a54e80188d644df42ad6a53bf986e8b0 diff --git a/gpt4all-bindings/python/gpt4all/_pyllmodel.py b/gpt4all-bindings/python/gpt4all/_pyllmodel.py index f5987c36..0beae663 100644 --- a/gpt4all-bindings/python/gpt4all/_pyllmodel.py +++ b/gpt4all-bindings/python/gpt4all/_pyllmodel.py @@ -274,11 +274,12 @@ class LLModel: all_gpus = self.list_gpus() available_gpus = self.list_gpus(mem_required) - unavailable_gpus = set(all_gpus).difference(available_gpus) + unavailable_gpus = [g for g in all_gpus if g not in available_gpus] - error_msg = "Unable to initialize model on GPU: {!r}".format(device) - error_msg += "\nAvailable GPUs: {}".format(available_gpus) - error_msg += "\nUnavailable GPUs due to insufficient memory or features: {}".format(unavailable_gpus) + error_msg = (f"Unable to initialize model on GPU: {device!r}" + + f"\nAvailable GPUs: {available_gpus}") + if unavailable_gpus: + error_msg += f"\nUnavailable GPUs due to insufficient memory: {unavailable_gpus}" raise ValueError(error_msg) def load_model(self) -> bool: