mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
Don't try and detect model load error on startup.
Signed-off-by: Adam Treat <treat.adam@gmail.com>
This commit is contained in:
parent
b0c471aed8
commit
fa0a2129dc
@ -274,16 +274,6 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
||||
// Store the file info in the modelInfo in case we have an error loading
|
||||
m_llModelInfo.fileInfo = fileInfo;
|
||||
|
||||
// Check if we've previously tried to load this file and failed/crashed
|
||||
if (MySettings::globalInstance()->attemptModelLoad() == filePath) {
|
||||
MySettings::globalInstance()->setAttemptModelLoad(QString()); // clear the flag
|
||||
if (!m_isServer)
|
||||
LLModelStore::globalInstance()->releaseModel(m_llModelInfo); // release back into the store
|
||||
m_llModelInfo = LLModelInfo();
|
||||
emit modelLoadingError(QString("Previous attempt to load model resulted in crash for `%1` most likely due to insufficient memory. You should either remove this model or decrease your system RAM usage by closing other applications.").arg(modelInfo.filename()));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (fileInfo.exists()) {
|
||||
if (isChatGPT) {
|
||||
QString apiKey;
|
||||
@ -319,9 +309,6 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
||||
return m_shouldBeLoaded;
|
||||
});
|
||||
|
||||
// Update the settings that a model is being loaded and update the device list
|
||||
MySettings::globalInstance()->setAttemptModelLoad(filePath);
|
||||
|
||||
// Pick the best match for the device
|
||||
QString actualDevice = m_llModelInfo.model->implementation().buildVariant() == "metal" ? "Metal" : "CPU";
|
||||
const QString requestedDevice = MySettings::globalInstance()->device();
|
||||
@ -373,7 +360,6 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
||||
emit reportFallbackReason("<br>model or quant has no GPU support");
|
||||
}
|
||||
|
||||
MySettings::globalInstance()->setAttemptModelLoad(QString());
|
||||
if (!success) {
|
||||
delete m_llModelInfo.model;
|
||||
m_llModelInfo.model = nullptr;
|
||||
|
@ -717,24 +717,3 @@ void MySettings::setNetworkUsageStatsActive(bool b)
|
||||
setting.sync();
|
||||
emit networkUsageStatsActiveChanged();
|
||||
}
|
||||
|
||||
QString MySettings::attemptModelLoad() const
|
||||
{
|
||||
QSettings setting;
|
||||
setting.sync();
|
||||
return setting.value("attemptModelLoad", QString()).toString();
|
||||
}
|
||||
|
||||
void MySettings::setAttemptModelLoad(const QString &modelFile)
|
||||
{
|
||||
if (attemptModelLoad() == modelFile)
|
||||
return;
|
||||
|
||||
QSettings setting;
|
||||
if (modelFile.isEmpty())
|
||||
setting.remove("attemptModelLoad");
|
||||
else
|
||||
setting.setValue("attemptModelLoad", modelFile);
|
||||
setting.sync();
|
||||
emit attemptModelLoadChanged();
|
||||
}
|
||||
|
@ -110,8 +110,6 @@ public:
|
||||
bool networkUsageStatsActive() const;
|
||||
void setNetworkUsageStatsActive(bool b);
|
||||
|
||||
QString attemptModelLoad() const;
|
||||
void setAttemptModelLoad(const QString &modelFile);
|
||||
|
||||
QVector<QString> deviceList() const;
|
||||
void setDeviceList(const QVector<QString> &deviceList);
|
||||
|
Loading…
Reference in New Issue
Block a user