diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp
index 835a58a3..1b950829 100644
--- a/gpt4all-chat/chatllm.cpp
+++ b/gpt4all-chat/chatllm.cpp
@@ -309,8 +309,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
// We might have had to fallback to CPU after load if the model is not possible to accelerate
// for instance if the quantization method is not supported on Vulkan yet
emit reportDevice("CPU");
- // TODO(cebtenzzre): report somewhere if llamamodel decided the model was not supported
- emit reportFallbackReason("
Using CPU: unsupported quantization type");
+ emit reportFallbackReason("
Using CPU: unsupported model or quant");
}
MySettings::globalInstance()->setAttemptModelLoad(QString());