mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
localdocs: load model before checking which model is loaded (#2284)
* localdocs: load model before checking what we loaded Fixes "WARNING: Request to generate sync embeddings for non-local model invalid" Signed-off-by: Jared Van Bortel <jared@nomic.ai> * fix inverted assertion Signed-off-by: Jared Van Bortel <jared@nomic.ai> --------- Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
adaecb7a72
commit
855fd22417
@ -97,16 +97,7 @@ bool EmbeddingLLMWorker::isNomic() const
|
|||||||
// this function is always called for retrieval tasks
|
// this function is always called for retrieval tasks
|
||||||
std::vector<float> EmbeddingLLMWorker::generateSyncEmbedding(const QString &text)
|
std::vector<float> EmbeddingLLMWorker::generateSyncEmbedding(const QString &text)
|
||||||
{
|
{
|
||||||
if (!hasModel() && !loadModel()) {
|
Q_ASSERT(!isNomic());
|
||||||
qWarning() << "WARNING: Could not load model for embeddings";
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isNomic()) {
|
|
||||||
qWarning() << "WARNING: Request to generate sync embeddings for non-local model invalid";
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<float> embedding(m_model->embeddingSize());
|
std::vector<float> embedding(m_model->embeddingSize());
|
||||||
try {
|
try {
|
||||||
m_model->embed({text.toStdString()}, embedding.data(), true);
|
m_model->embed({text.toStdString()}, embedding.data(), true);
|
||||||
@ -306,16 +297,21 @@ EmbeddingLLM::~EmbeddingLLM()
|
|||||||
|
|
||||||
std::vector<float> EmbeddingLLM::generateEmbeddings(const QString &text)
|
std::vector<float> EmbeddingLLM::generateEmbeddings(const QString &text)
|
||||||
{
|
{
|
||||||
|
if (!m_embeddingWorker->hasModel() && !m_embeddingWorker->loadModel()) {
|
||||||
|
qWarning() << "WARNING: Could not load model for embeddings";
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
if (!m_embeddingWorker->isNomic()) {
|
if (!m_embeddingWorker->isNomic()) {
|
||||||
return m_embeddingWorker->generateSyncEmbedding(text);
|
return m_embeddingWorker->generateSyncEmbedding(text);
|
||||||
} else {
|
}
|
||||||
|
|
||||||
EmbeddingLLMWorker worker;
|
EmbeddingLLMWorker worker;
|
||||||
connect(this, &EmbeddingLLM::requestSyncEmbedding, &worker,
|
connect(this, &EmbeddingLLM::requestSyncEmbedding, &worker,
|
||||||
&EmbeddingLLMWorker::requestSyncEmbedding, Qt::QueuedConnection);
|
&EmbeddingLLMWorker::requestSyncEmbedding, Qt::QueuedConnection);
|
||||||
emit requestSyncEmbedding(text);
|
emit requestSyncEmbedding(text);
|
||||||
worker.wait();
|
worker.wait();
|
||||||
return worker.lastResponse();
|
return worker.lastResponse();
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmbeddingLLM::generateAsyncEmbeddings(const QVector<EmbeddingChunk> &chunks)
|
void EmbeddingLLM::generateAsyncEmbeddings(const QVector<EmbeddingChunk> &chunks)
|
||||||
|
Loading…
Reference in New Issue
Block a user