2023-04-30 20:28:07 -04:00
|
|
|
#include "chat.h"
|
2023-05-04 15:31:41 -04:00
|
|
|
#include "llm.h"
|
2023-05-01 09:10:05 -04:00
|
|
|
#include "network.h"
|
2023-05-04 15:31:41 -04:00
|
|
|
#include "download.h"
|
2023-05-01 09:10:05 -04:00
|
|
|
|
|
|
|
Chat::Chat(QObject *parent)
|
|
|
|
: QObject(parent)
|
|
|
|
, m_id(Network::globalInstance()->generateUniqueId())
|
|
|
|
, m_name(tr("New Chat"))
|
|
|
|
, m_chatModel(new ChatModel(this))
|
|
|
|
, m_responseInProgress(false)
|
2023-05-04 15:31:41 -04:00
|
|
|
, m_creationDate(QDateTime::currentSecsSinceEpoch())
|
|
|
|
, m_llmodel(new ChatLLM(this))
|
2023-05-11 16:46:25 -04:00
|
|
|
, m_isServer(false)
|
2023-05-13 19:05:35 -04:00
|
|
|
, m_shouldDeleteLater(false)
|
2023-05-11 16:46:25 -04:00
|
|
|
{
|
|
|
|
connectLLM();
|
|
|
|
}
|
|
|
|
|
|
|
|
Chat::Chat(bool isServer, QObject *parent)
|
|
|
|
: QObject(parent)
|
|
|
|
, m_id(Network::globalInstance()->generateUniqueId())
|
|
|
|
, m_name(tr("Server Chat"))
|
|
|
|
, m_chatModel(new ChatModel(this))
|
|
|
|
, m_responseInProgress(false)
|
|
|
|
, m_creationDate(QDateTime::currentSecsSinceEpoch())
|
|
|
|
, m_llmodel(new Server(this))
|
|
|
|
, m_isServer(true)
|
2023-05-13 19:05:35 -04:00
|
|
|
, m_shouldDeleteLater(false)
|
2023-05-12 14:06:03 -04:00
|
|
|
{
|
|
|
|
connectLLM();
|
|
|
|
}
|
|
|
|
|
|
|
|
Chat::~Chat()
|
|
|
|
{
|
|
|
|
delete m_llmodel;
|
|
|
|
m_llmodel = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Chat::connectLLM()
|
2023-05-01 09:10:05 -04:00
|
|
|
{
|
2023-05-04 15:31:41 -04:00
|
|
|
// Should be in same thread
|
|
|
|
connect(Download::globalInstance(), &Download::modelListChanged, this, &Chat::modelListChanged, Qt::DirectConnection);
|
|
|
|
connect(this, &Chat::modelNameChanged, this, &Chat::modelListChanged, Qt::DirectConnection);
|
|
|
|
|
|
|
|
// Should be in different threads
|
2023-05-01 09:10:05 -04:00
|
|
|
connect(m_llmodel, &ChatLLM::isModelLoadedChanged, this, &Chat::isModelLoadedChanged, Qt::QueuedConnection);
|
2023-05-13 19:05:35 -04:00
|
|
|
connect(m_llmodel, &ChatLLM::isModelLoadedChanged, this, &Chat::handleModelLoadedChanged, Qt::QueuedConnection);
|
2023-05-04 15:31:41 -04:00
|
|
|
connect(m_llmodel, &ChatLLM::responseChanged, this, &Chat::handleResponseChanged, Qt::QueuedConnection);
|
2023-05-01 09:10:05 -04:00
|
|
|
connect(m_llmodel, &ChatLLM::responseStarted, this, &Chat::responseStarted, Qt::QueuedConnection);
|
|
|
|
connect(m_llmodel, &ChatLLM::responseStopped, this, &Chat::responseStopped, Qt::QueuedConnection);
|
2023-05-04 15:31:41 -04:00
|
|
|
connect(m_llmodel, &ChatLLM::modelNameChanged, this, &Chat::handleModelNameChanged, Qt::QueuedConnection);
|
2023-05-08 20:51:03 -04:00
|
|
|
connect(m_llmodel, &ChatLLM::modelLoadingError, this, &Chat::modelLoadingError, Qt::QueuedConnection);
|
2023-05-02 20:31:17 -04:00
|
|
|
connect(m_llmodel, &ChatLLM::recalcChanged, this, &Chat::handleRecalculating, Qt::QueuedConnection);
|
2023-05-02 11:19:17 -04:00
|
|
|
connect(m_llmodel, &ChatLLM::generatedNameChanged, this, &Chat::generatedNameChanged, Qt::QueuedConnection);
|
2023-05-01 09:10:05 -04:00
|
|
|
|
|
|
|
connect(this, &Chat::promptRequested, m_llmodel, &ChatLLM::prompt, Qt::QueuedConnection);
|
|
|
|
connect(this, &Chat::modelNameChangeRequested, m_llmodel, &ChatLLM::modelNameChangeRequested, Qt::QueuedConnection);
|
2023-05-04 15:31:41 -04:00
|
|
|
connect(this, &Chat::loadDefaultModelRequested, m_llmodel, &ChatLLM::loadDefaultModel, Qt::QueuedConnection);
|
|
|
|
connect(this, &Chat::loadModelRequested, m_llmodel, &ChatLLM::loadModel, Qt::QueuedConnection);
|
2023-05-02 11:19:17 -04:00
|
|
|
connect(this, &Chat::generateNameRequested, m_llmodel, &ChatLLM::generateName, Qt::QueuedConnection);
|
2023-05-01 09:10:05 -04:00
|
|
|
|
|
|
|
// The following are blocking operations and will block the gui thread, therefore must be fast
|
|
|
|
// to respond to
|
|
|
|
connect(this, &Chat::regenerateResponseRequested, m_llmodel, &ChatLLM::regenerateResponse, Qt::BlockingQueuedConnection);
|
|
|
|
connect(this, &Chat::resetResponseRequested, m_llmodel, &ChatLLM::resetResponse, Qt::BlockingQueuedConnection);
|
|
|
|
connect(this, &Chat::resetContextRequested, m_llmodel, &ChatLLM::resetContext, Qt::BlockingQueuedConnection);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Chat::reset()
|
|
|
|
{
|
2023-05-01 12:24:51 -04:00
|
|
|
stopGenerating();
|
2023-05-04 15:31:41 -04:00
|
|
|
// Erase our current on disk representation as we're completely resetting the chat along with id
|
|
|
|
LLM::globalInstance()->chatListModel()->removeChatFile(this);
|
2023-05-01 12:24:51 -04:00
|
|
|
emit resetContextRequested(); // blocking queued connection
|
2023-05-01 09:10:05 -04:00
|
|
|
m_id = Network::globalInstance()->generateUniqueId();
|
2023-05-01 12:24:51 -04:00
|
|
|
emit idChanged();
|
2023-05-04 15:31:41 -04:00
|
|
|
// NOTE: We deliberately do no reset the name or creation date to indictate that this was originally
|
|
|
|
// an older chat that was reset for another purpose. Resetting this data will lead to the chat
|
|
|
|
// name label changing back to 'New Chat' and showing up in the chat model list as a 'New Chat'
|
|
|
|
// further down in the list. This might surprise the user. In the future, we me might get rid of
|
|
|
|
// the "reset context" button in the UI. Right now, by changing the model in the combobox dropdown
|
|
|
|
// we effectively do a reset context. We *have* to do this right now when switching between different
|
|
|
|
// types of models. The only way to get rid of that would be a very long recalculate where we rebuild
|
|
|
|
// the context if we switch between different types of models. Probably the right way to fix this
|
|
|
|
// is to allow switching models but throwing up a dialog warning users if we switch between types
|
|
|
|
// of models that a long recalculation will ensue.
|
2023-05-01 09:10:05 -04:00
|
|
|
m_chatModel->clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Chat::isModelLoaded() const
|
|
|
|
{
|
|
|
|
return m_llmodel->isModelLoaded();
|
|
|
|
}
|
|
|
|
|
2023-05-04 15:31:41 -04:00
|
|
|
void Chat::prompt(const QString &prompt, const QString &prompt_template, int32_t n_predict,
|
|
|
|
int32_t top_k, float top_p, float temp, int32_t n_batch, float repeat_penalty,
|
|
|
|
int32_t repeat_penalty_tokens)
|
2023-05-01 09:10:05 -04:00
|
|
|
{
|
2023-05-04 15:31:41 -04:00
|
|
|
emit promptRequested(prompt, prompt_template, n_predict, top_k, top_p, temp, n_batch,
|
|
|
|
repeat_penalty, repeat_penalty_tokens, LLM::globalInstance()->threadCount());
|
2023-05-01 09:10:05 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
void Chat::regenerateResponse()
|
|
|
|
{
|
|
|
|
emit regenerateResponseRequested(); // blocking queued connection
|
|
|
|
}
|
|
|
|
|
|
|
|
void Chat::stopGenerating()
|
|
|
|
{
|
|
|
|
m_llmodel->stopGenerating();
|
|
|
|
}
|
|
|
|
|
|
|
|
QString Chat::response() const
|
|
|
|
{
|
|
|
|
return m_llmodel->response();
|
|
|
|
}
|
|
|
|
|
2023-05-04 15:31:41 -04:00
|
|
|
void Chat::handleResponseChanged()
|
|
|
|
{
|
|
|
|
const int index = m_chatModel->count() - 1;
|
|
|
|
m_chatModel->updateValue(index, response());
|
|
|
|
emit responseChanged();
|
|
|
|
}
|
|
|
|
|
2023-05-13 19:05:35 -04:00
|
|
|
void Chat::handleModelLoadedChanged()
|
|
|
|
{
|
|
|
|
if (m_shouldDeleteLater)
|
|
|
|
deleteLater();
|
|
|
|
}
|
|
|
|
|
2023-05-01 09:10:05 -04:00
|
|
|
void Chat::responseStarted()
|
|
|
|
{
|
|
|
|
m_responseInProgress = true;
|
|
|
|
emit responseInProgressChanged();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Chat::responseStopped()
|
|
|
|
{
|
|
|
|
m_responseInProgress = false;
|
|
|
|
emit responseInProgressChanged();
|
2023-05-09 23:43:16 -04:00
|
|
|
if (m_llmodel->generatedName().isEmpty())
|
2023-05-02 11:19:17 -04:00
|
|
|
emit generateNameRequested();
|
2023-05-09 23:43:16 -04:00
|
|
|
if (chatModel()->count() < 3)
|
|
|
|
Network::globalInstance()->sendChatStarted();
|
2023-05-01 09:10:05 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
QString Chat::modelName() const
|
|
|
|
{
|
|
|
|
return m_llmodel->modelName();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Chat::setModelName(const QString &modelName)
|
|
|
|
{
|
|
|
|
// doesn't block but will unload old model and load new one which the gui can see through changes
|
|
|
|
// to the isModelLoaded property
|
|
|
|
emit modelNameChangeRequested(modelName);
|
|
|
|
}
|
|
|
|
|
2023-05-01 12:30:54 -04:00
|
|
|
void Chat::newPromptResponsePair(const QString &prompt)
|
|
|
|
{
|
2023-05-11 16:46:25 -04:00
|
|
|
m_chatModel->updateCurrentResponse(m_chatModel->count() - 1, false);
|
2023-05-01 12:30:54 -04:00
|
|
|
m_chatModel->appendPrompt(tr("Prompt: "), prompt);
|
|
|
|
m_chatModel->appendResponse(tr("Response: "), prompt);
|
|
|
|
emit resetResponseRequested(); // blocking queued connection
|
|
|
|
}
|
|
|
|
|
2023-05-11 16:46:25 -04:00
|
|
|
void Chat::serverNewPromptResponsePair(const QString &prompt)
|
|
|
|
{
|
|
|
|
m_chatModel->updateCurrentResponse(m_chatModel->count() - 1, false);
|
|
|
|
m_chatModel->appendPrompt(tr("Prompt: "), prompt);
|
|
|
|
m_chatModel->appendResponse(tr("Response: "), prompt);
|
|
|
|
}
|
|
|
|
|
2023-05-01 09:10:05 -04:00
|
|
|
bool Chat::isRecalc() const
|
|
|
|
{
|
|
|
|
return m_llmodel->isRecalc();
|
|
|
|
}
|
2023-05-01 20:27:07 -04:00
|
|
|
|
2023-05-04 15:31:41 -04:00
|
|
|
void Chat::loadDefaultModel()
|
|
|
|
{
|
|
|
|
emit loadDefaultModelRequested();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Chat::loadModel(const QString &modelName)
|
|
|
|
{
|
|
|
|
emit loadModelRequested(modelName);
|
|
|
|
}
|
|
|
|
|
2023-05-13 19:05:35 -04:00
|
|
|
void Chat::unloadAndDeleteLater()
|
|
|
|
{
|
|
|
|
if (!isModelLoaded()) {
|
|
|
|
deleteLater();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
m_shouldDeleteLater = true;
|
|
|
|
unloadModel();
|
|
|
|
}
|
|
|
|
|
2023-05-04 15:31:41 -04:00
|
|
|
void Chat::unloadModel()
|
2023-05-01 20:27:07 -04:00
|
|
|
{
|
|
|
|
stopGenerating();
|
2023-05-13 19:05:35 -04:00
|
|
|
m_llmodel->setShouldBeLoaded(false);
|
2023-05-01 20:27:07 -04:00
|
|
|
}
|
|
|
|
|
2023-05-04 15:31:41 -04:00
|
|
|
void Chat::reloadModel()
|
2023-05-01 20:27:07 -04:00
|
|
|
{
|
2023-05-13 19:05:35 -04:00
|
|
|
m_llmodel->setShouldBeLoaded(true);
|
2023-05-01 20:27:07 -04:00
|
|
|
}
|
2023-05-02 11:19:17 -04:00
|
|
|
|
|
|
|
void Chat::generatedNameChanged()
|
|
|
|
{
|
|
|
|
// Only use the first three words maximum and remove newlines and extra spaces
|
|
|
|
QString gen = m_llmodel->generatedName().simplified();
|
|
|
|
QStringList words = gen.split(' ', Qt::SkipEmptyParts);
|
|
|
|
int wordCount = qMin(3, words.size());
|
|
|
|
m_name = words.mid(0, wordCount).join(' ');
|
|
|
|
emit nameChanged();
|
|
|
|
}
|
2023-05-02 20:31:17 -04:00
|
|
|
|
|
|
|
void Chat::handleRecalculating()
|
|
|
|
{
|
|
|
|
Network::globalInstance()->sendRecalculatingContext(m_chatModel->count());
|
2023-05-04 15:31:41 -04:00
|
|
|
emit recalcChanged();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Chat::handleModelNameChanged()
|
|
|
|
{
|
|
|
|
m_savedModelName = modelName();
|
|
|
|
emit modelNameChanged();
|
|
|
|
}
|
|
|
|
|
2023-05-08 05:52:57 -04:00
|
|
|
bool Chat::serialize(QDataStream &stream, int version) const
|
2023-05-04 15:31:41 -04:00
|
|
|
{
|
|
|
|
stream << m_creationDate;
|
|
|
|
stream << m_id;
|
|
|
|
stream << m_name;
|
|
|
|
stream << m_userName;
|
|
|
|
stream << m_savedModelName;
|
2023-05-08 05:52:57 -04:00
|
|
|
if (!m_llmodel->serialize(stream, version))
|
2023-05-04 15:31:41 -04:00
|
|
|
return false;
|
2023-05-08 05:52:57 -04:00
|
|
|
if (!m_chatModel->serialize(stream, version))
|
2023-05-04 15:31:41 -04:00
|
|
|
return false;
|
|
|
|
return stream.status() == QDataStream::Ok;
|
|
|
|
}
|
|
|
|
|
2023-05-08 05:52:57 -04:00
|
|
|
bool Chat::deserialize(QDataStream &stream, int version)
|
2023-05-04 15:31:41 -04:00
|
|
|
{
|
|
|
|
stream >> m_creationDate;
|
|
|
|
stream >> m_id;
|
|
|
|
emit idChanged();
|
|
|
|
stream >> m_name;
|
|
|
|
stream >> m_userName;
|
|
|
|
emit nameChanged();
|
|
|
|
stream >> m_savedModelName;
|
2023-05-08 17:23:02 -04:00
|
|
|
// Prior to version 2 gptj models had a bug that fixed the kv_cache to F32 instead of F16 so
|
|
|
|
// unfortunately, we cannot deserialize these
|
|
|
|
if (version < 2 && m_savedModelName.contains("gpt4all-j"))
|
|
|
|
return false;
|
2023-05-08 05:52:57 -04:00
|
|
|
if (!m_llmodel->deserialize(stream, version))
|
2023-05-04 15:31:41 -04:00
|
|
|
return false;
|
2023-05-08 05:52:57 -04:00
|
|
|
if (!m_chatModel->deserialize(stream, version))
|
2023-05-04 15:31:41 -04:00
|
|
|
return false;
|
|
|
|
emit chatModelChanged();
|
|
|
|
return stream.status() == QDataStream::Ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
QList<QString> Chat::modelList() const
|
|
|
|
{
|
|
|
|
// Build a model list from exepath and from the localpath
|
|
|
|
QList<QString> list;
|
|
|
|
|
|
|
|
QString exePath = QCoreApplication::applicationDirPath() + QDir::separator();
|
|
|
|
QString localPath = Download::globalInstance()->downloadLocalModelsPath();
|
|
|
|
|
2023-05-11 16:46:25 -04:00
|
|
|
QSettings settings;
|
|
|
|
settings.sync();
|
|
|
|
// The user default model can be set by the user in the settings dialog. The "default" user
|
|
|
|
// default model is "Application default" which signals we should use the default model that was
|
|
|
|
// specified by the models.json file.
|
|
|
|
QString defaultModel = settings.value("userDefaultModel").toString();
|
|
|
|
if (defaultModel.isEmpty() || defaultModel == "Application default")
|
|
|
|
defaultModel = settings.value("defaultModel").toString();
|
|
|
|
|
|
|
|
QString currentModelName = modelName().isEmpty() ? defaultModel : modelName();
|
|
|
|
|
2023-05-04 15:31:41 -04:00
|
|
|
{
|
|
|
|
QDir dir(exePath);
|
|
|
|
dir.setNameFilters(QStringList() << "ggml-*.bin");
|
|
|
|
QStringList fileNames = dir.entryList();
|
|
|
|
for (QString f : fileNames) {
|
|
|
|
QString filePath = exePath + f;
|
|
|
|
QFileInfo info(filePath);
|
|
|
|
QString name = info.completeBaseName().remove(0, 5);
|
|
|
|
if (info.exists()) {
|
2023-05-11 16:46:25 -04:00
|
|
|
if (name == currentModelName)
|
2023-05-04 15:31:41 -04:00
|
|
|
list.prepend(name);
|
|
|
|
else
|
|
|
|
list.append(name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (localPath != exePath) {
|
|
|
|
QDir dir(localPath);
|
|
|
|
dir.setNameFilters(QStringList() << "ggml-*.bin");
|
|
|
|
QStringList fileNames = dir.entryList();
|
|
|
|
for (QString f : fileNames) {
|
|
|
|
QString filePath = localPath + f;
|
|
|
|
QFileInfo info(filePath);
|
|
|
|
QString name = info.completeBaseName().remove(0, 5);
|
|
|
|
if (info.exists() && !list.contains(name)) { // don't allow duplicates
|
2023-05-11 16:46:25 -04:00
|
|
|
if (name == currentModelName)
|
2023-05-04 15:31:41 -04:00
|
|
|
list.prepend(name);
|
|
|
|
else
|
|
|
|
list.append(name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (list.isEmpty()) {
|
|
|
|
if (exePath != localPath) {
|
|
|
|
qWarning() << "ERROR: Could not find any applicable models in"
|
|
|
|
<< exePath << "nor" << localPath;
|
|
|
|
} else {
|
|
|
|
qWarning() << "ERROR: Could not find any applicable models in"
|
|
|
|
<< exePath;
|
|
|
|
}
|
|
|
|
return QList<QString>();
|
|
|
|
}
|
|
|
|
|
|
|
|
return list;
|
2023-05-02 20:31:17 -04:00
|
|
|
}
|