2023-10-24 12:13:32 -04:00
|
|
|
#include "embllm.h"
|
2024-06-04 14:47:11 -04:00
|
|
|
|
2023-10-24 12:13:32 -04:00
|
|
|
#include "modellist.h"
|
2024-06-24 18:49:23 -04:00
|
|
|
#include "mysettings.h"
|
2023-10-24 12:13:32 -04:00
|
|
|
|
2024-06-04 14:47:11 -04:00
|
|
|
#include "../gpt4all-backend/llmodel.h"
|
|
|
|
|
|
|
|
#include <QCoreApplication>
|
|
|
|
#include <QDebug>
|
|
|
|
#include <QFile>
|
|
|
|
#include <QFileInfo>
|
|
|
|
#include <QGuiApplication>
|
|
|
|
#include <QIODevice>
|
|
|
|
#include <QJsonArray>
|
|
|
|
#include <QJsonDocument>
|
|
|
|
#include <QJsonObject>
|
|
|
|
#include <QList>
|
2024-06-24 18:49:23 -04:00
|
|
|
#include <QMutexLocker>
|
2024-06-04 14:47:11 -04:00
|
|
|
#include <QNetworkAccessManager>
|
|
|
|
#include <QNetworkReply>
|
|
|
|
#include <QNetworkRequest>
|
|
|
|
#include <QUrl>
|
|
|
|
#include <Qt>
|
|
|
|
#include <QtGlobal>
|
|
|
|
#include <QtLogging>
|
|
|
|
|
|
|
|
#include <exception>
|
|
|
|
#include <utility>
|
2024-06-28 12:57:57 -04:00
|
|
|
#include <vector>
|
2024-06-04 14:47:11 -04:00
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
using namespace Qt::Literals::StringLiterals;
|
|
|
|
|
|
|
|
static const QString EMBEDDING_MODEL_NAME = u"nomic-embed-text-v1.5"_s;
|
|
|
|
static const QString LOCAL_EMBEDDING_MODEL = u"nomic-embed-text-v1.5.f16.gguf"_s;
|
|
|
|
|
2024-01-22 12:36:01 -05:00
|
|
|
EmbeddingLLMWorker::EmbeddingLLMWorker()
|
|
|
|
: QObject(nullptr)
|
|
|
|
, m_networkManager(new QNetworkAccessManager(this))
|
2024-05-15 10:01:53 -04:00
|
|
|
, m_stopGenerating(false)
|
2023-10-24 12:13:32 -04:00
|
|
|
{
|
2024-01-22 12:36:01 -05:00
|
|
|
moveToThread(&m_workerThread);
|
2024-06-24 18:49:23 -04:00
|
|
|
connect(this, &EmbeddingLLMWorker::requestAtlasQueryEmbedding, this, &EmbeddingLLMWorker::atlasQueryEmbeddingRequested);
|
2024-01-22 12:36:01 -05:00
|
|
|
connect(this, &EmbeddingLLMWorker::finished, &m_workerThread, &QThread::quit, Qt::DirectConnection);
|
|
|
|
m_workerThread.setObjectName("embedding");
|
|
|
|
m_workerThread.start();
|
2023-10-24 12:13:32 -04:00
|
|
|
}
|
|
|
|
|
2024-01-22 12:36:01 -05:00
|
|
|
EmbeddingLLMWorker::~EmbeddingLLMWorker()
|
2023-10-24 12:13:32 -04:00
|
|
|
{
|
2024-05-15 10:01:53 -04:00
|
|
|
m_stopGenerating = true;
|
|
|
|
m_workerThread.quit();
|
|
|
|
m_workerThread.wait();
|
|
|
|
|
2024-01-22 12:36:01 -05:00
|
|
|
if (m_model) {
|
|
|
|
delete m_model;
|
|
|
|
m_model = nullptr;
|
|
|
|
}
|
2023-10-24 12:13:32 -04:00
|
|
|
}
|
|
|
|
|
2024-01-22 12:36:01 -05:00
|
|
|
void EmbeddingLLMWorker::wait()
|
|
|
|
{
|
|
|
|
m_workerThread.wait();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool EmbeddingLLMWorker::loadModel()
|
2023-10-24 12:13:32 -04:00
|
|
|
{
|
2024-06-28 12:57:57 -04:00
|
|
|
constexpr int n_ctx = 2048;
|
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
m_nomicAPIKey.clear();
|
|
|
|
m_model = nullptr;
|
2023-10-24 12:13:32 -04:00
|
|
|
|
2024-06-28 12:57:57 -04:00
|
|
|
// TODO(jared): react to setting changes without restarting
|
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
if (MySettings::globalInstance()->localDocsUseRemoteEmbed()) {
|
|
|
|
m_nomicAPIKey = MySettings::globalInstance()->localDocsNomicAPIKey();
|
|
|
|
return true;
|
2023-10-24 12:13:32 -04:00
|
|
|
}
|
|
|
|
|
2024-06-25 17:22:51 -04:00
|
|
|
#ifdef Q_OS_DARWIN
|
|
|
|
static const QString embPathFmt = u"%1/../Resources/%2"_s;
|
|
|
|
#else
|
|
|
|
static const QString embPathFmt = u"%1/../resources/%2"_s;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
QString filePath = embPathFmt.arg(QCoreApplication::applicationDirPath(), LOCAL_EMBEDDING_MODEL);
|
2024-06-24 18:49:23 -04:00
|
|
|
if (!QFileInfo::exists(filePath)) {
|
2024-06-28 12:57:57 -04:00
|
|
|
qWarning() << "embllm WARNING: Local embedding model not found";
|
2024-06-24 18:49:23 -04:00
|
|
|
return false;
|
2024-01-22 12:36:01 -05:00
|
|
|
}
|
|
|
|
|
2024-06-28 12:57:57 -04:00
|
|
|
QString requestedDevice = MySettings::globalInstance()->localDocsEmbedDevice();
|
|
|
|
std::string backend = "auto";
|
|
|
|
#ifdef Q_OS_MAC
|
|
|
|
if (requestedDevice == "Auto" || requestedDevice == "CPU")
|
|
|
|
backend = "cpu";
|
|
|
|
#else
|
|
|
|
if (requestedDevice.startsWith("CUDA: "))
|
|
|
|
backend = "cuda";
|
|
|
|
#endif
|
|
|
|
|
2024-04-25 13:16:52 -04:00
|
|
|
try {
|
2024-06-28 12:57:57 -04:00
|
|
|
m_model = LLModel::Implementation::construct(filePath.toStdString(), backend, n_ctx);
|
2024-04-25 13:16:52 -04:00
|
|
|
} catch (const std::exception &e) {
|
2024-06-28 12:57:57 -04:00
|
|
|
qWarning() << "embllm WARNING: Could not load embedding model:" << e.what();
|
2024-04-25 13:16:52 -04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2024-06-28 12:57:57 -04:00
|
|
|
bool actualDeviceIsCPU = true;
|
|
|
|
|
|
|
|
#if defined(Q_OS_MAC) && defined(__aarch64__)
|
|
|
|
if (m_model->implementation().buildVariant() == "metal")
|
|
|
|
actualDeviceIsCPU = false;
|
|
|
|
#else
|
|
|
|
if (requestedDevice != "CPU") {
|
|
|
|
const LLModel::GPUDevice *device = nullptr;
|
|
|
|
std::vector<LLModel::GPUDevice> availableDevices = m_model->availableGPUDevices(0);
|
|
|
|
if (requestedDevice != "Auto") {
|
|
|
|
// Use the selected device
|
|
|
|
for (const LLModel::GPUDevice &d : availableDevices) {
|
|
|
|
if (QString::fromStdString(d.selectionName()) == requestedDevice) {
|
|
|
|
device = &d;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string unavail_reason;
|
|
|
|
if (!device) {
|
|
|
|
// GPU not available
|
|
|
|
} else if (!m_model->initializeGPUDevice(device->index, &unavail_reason)) {
|
|
|
|
qWarning().noquote() << "embllm WARNING: Did not use GPU:" << QString::fromStdString(unavail_reason);
|
|
|
|
} else {
|
|
|
|
actualDeviceIsCPU = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
bool success = m_model->loadModel(filePath.toStdString(), n_ctx, 100);
|
|
|
|
|
|
|
|
// CPU fallback
|
|
|
|
if (!actualDeviceIsCPU && !success) {
|
|
|
|
// llama_init_from_file returned nullptr
|
|
|
|
qWarning() << "embllm WARNING: Did not use GPU: GPU loading failed (out of VRAM?)";
|
|
|
|
|
|
|
|
if (backend == "cuda") {
|
|
|
|
// For CUDA, make sure we don't use the GPU at all - ngl=0 still offloads matmuls
|
|
|
|
try {
|
|
|
|
m_model = LLModel::Implementation::construct(filePath.toStdString(), "auto", n_ctx);
|
|
|
|
} catch (const std::exception &e) {
|
|
|
|
qWarning() << "embllm WARNING: Could not load embedding model:" << e.what();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
success = m_model->loadModel(filePath.toStdString(), n_ctx, 0);
|
|
|
|
}
|
|
|
|
|
2023-10-24 12:13:32 -04:00
|
|
|
if (!success) {
|
2024-06-28 12:57:57 -04:00
|
|
|
qWarning() << "embllm WARNING: Could not load embedding model";
|
2023-10-24 12:13:32 -04:00
|
|
|
delete m_model;
|
|
|
|
m_model = nullptr;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2024-03-13 18:09:24 -04:00
|
|
|
if (!m_model->supportsEmbedding()) {
|
2024-06-28 12:57:57 -04:00
|
|
|
qWarning() << "embllm WARNING: Model type does not support embeddings";
|
2023-10-24 12:13:32 -04:00
|
|
|
delete m_model;
|
|
|
|
m_model = nullptr;
|
|
|
|
return false;
|
|
|
|
}
|
2024-06-25 11:04:01 -04:00
|
|
|
|
|
|
|
// FIXME(jared): the user may want this to take effect without having to restart
|
|
|
|
int n_threads = MySettings::globalInstance()->threadCount();
|
|
|
|
m_model->setThreadCount(n_threads);
|
|
|
|
|
2023-10-24 12:13:32 -04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
std::vector<float> EmbeddingLLMWorker::generateQueryEmbedding(const QString &text)
|
2023-10-24 12:13:32 -04:00
|
|
|
{
|
2024-06-24 18:49:23 -04:00
|
|
|
{
|
|
|
|
QMutexLocker locker(&m_mutex);
|
2023-10-24 12:13:32 -04:00
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
if (!hasModel() && !loadModel()) {
|
|
|
|
qWarning() << "WARNING: Could not load model for embeddings";
|
|
|
|
return {};
|
|
|
|
}
|
2024-01-22 12:36:01 -05:00
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
if (!isNomic()) {
|
|
|
|
std::vector<float> embedding(m_model->embeddingSize());
|
|
|
|
|
|
|
|
try {
|
2024-06-28 12:57:57 -04:00
|
|
|
m_model->embed({text.toStdString()}, embedding.data(), /*isRetrieval*/ true);
|
2024-06-24 18:49:23 -04:00
|
|
|
} catch (const std::exception &e) {
|
|
|
|
qWarning() << "WARNING: LLModel::embed failed:" << e.what();
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
return embedding;
|
|
|
|
}
|
2024-03-13 18:09:24 -04:00
|
|
|
}
|
2024-06-24 18:49:23 -04:00
|
|
|
|
|
|
|
EmbeddingLLMWorker worker;
|
|
|
|
emit worker.requestAtlasQueryEmbedding(text);
|
|
|
|
worker.wait();
|
|
|
|
return worker.lastResponse();
|
2023-10-24 12:13:32 -04:00
|
|
|
}
|
2024-01-22 12:36:01 -05:00
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
void EmbeddingLLMWorker::sendAtlasRequest(const QStringList &texts, const QString &taskType, const QVariant &userData)
|
|
|
|
{
|
2024-03-13 18:09:24 -04:00
|
|
|
QJsonObject root;
|
|
|
|
root.insert("model", "nomic-embed-text-v1");
|
|
|
|
root.insert("texts", QJsonArray::fromStringList(texts));
|
|
|
|
root.insert("task_type", taskType);
|
|
|
|
|
|
|
|
QJsonDocument doc(root);
|
|
|
|
|
|
|
|
QUrl nomicUrl("https://api-atlas.nomic.ai/v1/embedding/text");
|
2024-06-24 18:49:23 -04:00
|
|
|
const QString authorization = u"Bearer %1"_s.arg(m_nomicAPIKey).trimmed();
|
2024-03-13 18:09:24 -04:00
|
|
|
QNetworkRequest request(nomicUrl);
|
|
|
|
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
|
|
|
|
request.setRawHeader("Authorization", authorization.toUtf8());
|
|
|
|
request.setAttribute(QNetworkRequest::User, userData);
|
|
|
|
QNetworkReply *reply = m_networkManager->post(request, doc.toJson(QJsonDocument::Compact));
|
2024-06-04 14:47:11 -04:00
|
|
|
connect(qGuiApp, &QCoreApplication::aboutToQuit, reply, &QNetworkReply::abort);
|
2024-03-13 18:09:24 -04:00
|
|
|
connect(reply, &QNetworkReply::finished, this, &EmbeddingLLMWorker::handleFinished);
|
|
|
|
}
|
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
void EmbeddingLLMWorker::atlasQueryEmbeddingRequested(const QString &text)
|
2024-01-22 12:36:01 -05:00
|
|
|
{
|
2024-06-24 18:49:23 -04:00
|
|
|
{
|
|
|
|
QMutexLocker locker(&m_mutex);
|
|
|
|
if (!hasModel() && !loadModel()) {
|
|
|
|
qWarning() << "WARNING: Could not load model for embeddings";
|
|
|
|
return;
|
|
|
|
}
|
2024-01-22 12:36:01 -05:00
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
if (!isNomic()) {
|
|
|
|
qWarning() << "WARNING: Request to generate sync embeddings for local model invalid";
|
|
|
|
return;
|
|
|
|
}
|
2024-01-22 12:36:01 -05:00
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
Q_ASSERT(hasModel());
|
|
|
|
}
|
2024-01-22 12:36:01 -05:00
|
|
|
|
2024-03-13 18:09:24 -04:00
|
|
|
sendAtlasRequest({text}, "search_query");
|
2024-01-22 12:36:01 -05:00
|
|
|
}
|
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
void EmbeddingLLMWorker::docEmbeddingsRequested(const QVector<EmbeddingChunk> &chunks)
|
2024-01-22 12:36:01 -05:00
|
|
|
{
|
2024-05-15 10:01:53 -04:00
|
|
|
if (m_stopGenerating)
|
|
|
|
return;
|
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
bool isNomic;
|
|
|
|
{
|
|
|
|
QMutexLocker locker(&m_mutex);
|
|
|
|
if (!hasModel() && !loadModel()) {
|
|
|
|
qWarning() << "WARNING: Could not load model for embeddings";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
isNomic = this->isNomic();
|
2024-01-22 12:36:01 -05:00
|
|
|
}
|
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
if (!isNomic) {
|
2024-01-22 12:36:01 -05:00
|
|
|
QVector<EmbeddingResult> results;
|
|
|
|
results.reserve(chunks.size());
|
2024-06-28 12:57:57 -04:00
|
|
|
std::vector<std::string> texts;
|
|
|
|
texts.reserve(chunks.size());
|
2024-06-24 18:49:23 -04:00
|
|
|
for (const auto &c: chunks) {
|
2024-01-22 12:36:01 -05:00
|
|
|
EmbeddingResult result;
|
2024-06-24 18:49:23 -04:00
|
|
|
result.model = c.model;
|
2024-01-22 12:36:01 -05:00
|
|
|
result.folder_id = c.folder_id;
|
|
|
|
result.chunk_id = c.chunk_id;
|
2024-03-13 18:09:24 -04:00
|
|
|
result.embedding.resize(m_model->embeddingSize());
|
2024-06-28 12:57:57 -04:00
|
|
|
results << result;
|
|
|
|
texts.push_back(c.chunk.toStdString());
|
|
|
|
}
|
2024-06-24 18:49:23 -04:00
|
|
|
|
2024-06-28 12:57:57 -04:00
|
|
|
constexpr int BATCH_SIZE = 4;
|
|
|
|
std::vector<float> result;
|
|
|
|
result.resize(chunks.size() * m_model->embeddingSize());
|
|
|
|
for (int j = 0; j < chunks.size(); j += BATCH_SIZE) {
|
|
|
|
QMutexLocker locker(&m_mutex);
|
|
|
|
std::vector batchTexts(texts.begin() + j, texts.begin() + std::min(j + BATCH_SIZE, int(texts.size())));
|
|
|
|
try {
|
|
|
|
m_model->embed(batchTexts, result.data() + j * m_model->embeddingSize(), /*isRetrieval*/ false);
|
|
|
|
} catch (const std::exception &e) {
|
|
|
|
qWarning() << "WARNING: LLModel::embed failed:" << e.what();
|
|
|
|
return;
|
2024-03-13 18:09:24 -04:00
|
|
|
}
|
2024-01-22 12:36:01 -05:00
|
|
|
}
|
2024-06-28 12:57:57 -04:00
|
|
|
for (int i = 0; i < chunks.size(); i++)
|
|
|
|
memcpy(results[i].embedding.data(), &result[i * m_model->embeddingSize()], m_model->embeddingSize() * sizeof(float));
|
|
|
|
|
2024-01-22 12:36:01 -05:00
|
|
|
emit embeddingsGenerated(results);
|
|
|
|
return;
|
|
|
|
};
|
|
|
|
|
2024-03-13 18:09:24 -04:00
|
|
|
QStringList texts;
|
|
|
|
for (auto &c: chunks)
|
2024-01-22 12:36:01 -05:00
|
|
|
texts.append(c.chunk);
|
2024-03-13 18:09:24 -04:00
|
|
|
sendAtlasRequest(texts, "search_document", QVariant::fromValue(chunks));
|
2024-01-22 12:36:01 -05:00
|
|
|
}
|
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
std::vector<float> jsonArrayToVector(const QJsonArray &jsonArray)
|
|
|
|
{
|
2024-01-22 12:36:01 -05:00
|
|
|
std::vector<float> result;
|
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
for (const auto &innerValue: jsonArray) {
|
2024-01-22 12:36:01 -05:00
|
|
|
if (innerValue.isArray()) {
|
|
|
|
QJsonArray innerArray = innerValue.toArray();
|
|
|
|
result.reserve(result.size() + innerArray.size());
|
2024-06-24 18:49:23 -04:00
|
|
|
for (const auto &value: innerArray) {
|
2024-01-22 12:36:01 -05:00
|
|
|
result.push_back(static_cast<float>(value.toDouble()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
QVector<EmbeddingResult> jsonArrayToEmbeddingResults(const QVector<EmbeddingChunk>& chunks, const QJsonArray& embeddings)
|
|
|
|
{
|
2024-01-22 12:36:01 -05:00
|
|
|
QVector<EmbeddingResult> results;
|
|
|
|
|
|
|
|
if (chunks.size() != embeddings.size()) {
|
|
|
|
qWarning() << "WARNING: Size of json array result does not match input!";
|
|
|
|
return results;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < chunks.size(); ++i) {
|
|
|
|
const EmbeddingChunk& chunk = chunks.at(i);
|
|
|
|
const QJsonArray embeddingArray = embeddings.at(i).toArray();
|
|
|
|
|
|
|
|
std::vector<float> embeddingVector;
|
2024-06-24 18:49:23 -04:00
|
|
|
for (const auto &value: embeddingArray)
|
2024-01-22 12:36:01 -05:00
|
|
|
embeddingVector.push_back(static_cast<float>(value.toDouble()));
|
|
|
|
|
|
|
|
EmbeddingResult result;
|
2024-06-24 18:49:23 -04:00
|
|
|
result.model = chunk.model;
|
2024-01-22 12:36:01 -05:00
|
|
|
result.folder_id = chunk.folder_id;
|
|
|
|
result.chunk_id = chunk.chunk_id;
|
|
|
|
result.embedding = std::move(embeddingVector);
|
|
|
|
results.push_back(std::move(result));
|
|
|
|
}
|
|
|
|
|
|
|
|
return results;
|
|
|
|
}
|
|
|
|
|
|
|
|
void EmbeddingLLMWorker::handleFinished()
|
|
|
|
{
|
|
|
|
QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());
|
|
|
|
if (!reply)
|
|
|
|
return;
|
|
|
|
|
|
|
|
QVariant retrievedData = reply->request().attribute(QNetworkRequest::User);
|
|
|
|
QVector<EmbeddingChunk> chunks;
|
|
|
|
if (retrievedData.isValid() && retrievedData.canConvert<QVector<EmbeddingChunk>>())
|
|
|
|
chunks = retrievedData.value<QVector<EmbeddingChunk>>();
|
|
|
|
|
|
|
|
QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
|
|
|
Q_ASSERT(response.isValid());
|
|
|
|
bool ok;
|
|
|
|
int code = response.toInt(&ok);
|
|
|
|
if (!ok || code != 200) {
|
|
|
|
QString errorDetails;
|
|
|
|
QString replyErrorString = reply->errorString().trimmed();
|
|
|
|
QByteArray replyContent = reply->readAll().trimmed();
|
2024-06-24 18:49:23 -04:00
|
|
|
errorDetails = u"ERROR: Nomic Atlas responded with error code \"%1\""_s.arg(code);
|
2024-01-22 12:36:01 -05:00
|
|
|
if (!replyErrorString.isEmpty())
|
2024-06-24 18:49:23 -04:00
|
|
|
errorDetails += u". Error Details: \"%1\""_s.arg(replyErrorString);
|
2024-01-22 12:36:01 -05:00
|
|
|
if (!replyContent.isEmpty())
|
2024-06-24 18:49:23 -04:00
|
|
|
errorDetails += u". Response Content: \"%1\""_s.arg(QString::fromUtf8(replyContent));
|
2024-01-22 12:36:01 -05:00
|
|
|
qWarning() << errorDetails;
|
2024-06-24 18:49:23 -04:00
|
|
|
emit errorGenerated(chunks, errorDetails);
|
2024-01-22 12:36:01 -05:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
QByteArray jsonData = reply->readAll();
|
|
|
|
|
|
|
|
QJsonParseError err;
|
|
|
|
QJsonDocument document = QJsonDocument::fromJson(jsonData, &err);
|
|
|
|
if (err.error != QJsonParseError::NoError) {
|
2024-06-24 18:49:23 -04:00
|
|
|
qWarning() << "ERROR: Couldn't parse Nomic Atlas response:" << jsonData << err.errorString();
|
2024-01-22 12:36:01 -05:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const QJsonObject root = document.object();
|
|
|
|
const QJsonArray embeddings = root.value("embeddings").toArray();
|
|
|
|
|
|
|
|
if (!chunks.isEmpty()) {
|
|
|
|
emit embeddingsGenerated(jsonArrayToEmbeddingResults(chunks, embeddings));
|
|
|
|
} else {
|
|
|
|
m_lastResponse = jsonArrayToVector(embeddings);
|
|
|
|
emit finished();
|
|
|
|
}
|
|
|
|
|
|
|
|
reply->deleteLater();
|
|
|
|
}
|
|
|
|
|
|
|
|
EmbeddingLLM::EmbeddingLLM()
|
|
|
|
: QObject(nullptr)
|
|
|
|
, m_embeddingWorker(new EmbeddingLLMWorker)
|
|
|
|
{
|
2024-06-24 18:49:23 -04:00
|
|
|
connect(this, &EmbeddingLLM::requestDocEmbeddings, m_embeddingWorker,
|
|
|
|
&EmbeddingLLMWorker::docEmbeddingsRequested, Qt::QueuedConnection);
|
2024-01-22 12:36:01 -05:00
|
|
|
connect(m_embeddingWorker, &EmbeddingLLMWorker::embeddingsGenerated, this,
|
|
|
|
&EmbeddingLLM::embeddingsGenerated, Qt::QueuedConnection);
|
|
|
|
connect(m_embeddingWorker, &EmbeddingLLMWorker::errorGenerated, this,
|
|
|
|
&EmbeddingLLM::errorGenerated, Qt::QueuedConnection);
|
|
|
|
}
|
|
|
|
|
|
|
|
EmbeddingLLM::~EmbeddingLLM()
|
|
|
|
{
|
|
|
|
delete m_embeddingWorker;
|
|
|
|
m_embeddingWorker = nullptr;
|
|
|
|
}
|
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
QString EmbeddingLLM::model()
|
2024-01-22 12:36:01 -05:00
|
|
|
{
|
2024-06-24 18:49:23 -04:00
|
|
|
return EMBEDDING_MODEL_NAME;
|
|
|
|
}
|
2024-05-02 09:30:36 -04:00
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
// TODO(jared): embed using all necessary embedding models given collection
|
|
|
|
std::vector<float> EmbeddingLLM::generateQueryEmbedding(const QString &text)
|
|
|
|
{
|
|
|
|
return m_embeddingWorker->generateQueryEmbedding(text);
|
2024-01-22 12:36:01 -05:00
|
|
|
}
|
|
|
|
|
2024-06-24 18:49:23 -04:00
|
|
|
void EmbeddingLLM::generateDocEmbeddingsAsync(const QVector<EmbeddingChunk> &chunks)
|
2024-01-22 12:36:01 -05:00
|
|
|
{
|
2024-06-24 18:49:23 -04:00
|
|
|
emit requestDocEmbeddings(chunks);
|
2024-01-22 12:36:01 -05:00
|
|
|
}
|