diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 58d68e60..4ae8c843 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -275,7 +275,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) if (requestedDevice != "CPU") { const size_t requiredMemory = m_llModelInfo.model->requiredMem(filePath.toStdString()); std::vector availableDevices = m_llModelInfo.model->availableGPUDevices(requiredMemory); - if (!availableDevices.empty() && requestedDevice == "Auto") { + if (!availableDevices.empty() && requestedDevice == "Auto" && devices.front().type == 2 /*a discrete gpu*/) { m_llModelInfo.model->initializeGPUDevice(devices.front()); } else { for (LLModel::GPUDevice &d : availableDevices) {