gpt4all/gpt4all-chat/main.cpp

74 lines
2.7 KiB
C++
Raw Normal View History

#include <QDirIterator>
2023-04-09 03:28:39 +00:00
#include <QGuiApplication>
#include <QQmlApplicationEngine>
#include <QQmlContext>
#include "llm.h"
2023-06-22 19:44:49 +00:00
#include "modellist.h"
#include "chatlistmodel.h"
2023-05-18 22:59:10 +00:00
#include "localdocs.h"
2023-04-19 01:10:06 +00:00
#include "download.h"
#include "network.h"
2023-06-27 15:54:34 +00:00
#include "mysettings.h"
2023-04-16 23:20:43 +00:00
#include "config.h"
2023-06-01 14:50:42 +00:00
#include "logger.h"
#include "../gpt4all-backend/llmodel.h"
2023-04-09 03:28:39 +00:00
int main(int argc, char *argv[])
{
2023-04-19 01:10:06 +00:00
QCoreApplication::setOrganizationName("nomic.ai");
QCoreApplication::setOrganizationDomain("gpt4all.io");
QCoreApplication::setApplicationName("GPT4All");
2023-04-16 23:20:43 +00:00
QCoreApplication::setApplicationVersion(APP_VERSION);
2023-06-01 14:50:42 +00:00
Logger::globalInstance();
2023-04-09 03:28:39 +00:00
QGuiApplication app(argc, argv);
QQmlApplicationEngine engine;
QString llmodelSearchPaths = QCoreApplication::applicationDirPath();
const QString libDir = QCoreApplication::applicationDirPath() + "/../lib/";
if (LLM::directoryExists(libDir))
llmodelSearchPaths += ";" + libDir;
#if defined(Q_OS_MAC)
const QString binDir = QCoreApplication::applicationDirPath() + "/../../../";
if (LLM::directoryExists(binDir))
llmodelSearchPaths += ";" + binDir;
const QString frameworksDir = QCoreApplication::applicationDirPath() + "/../Frameworks/";
if (LLM::directoryExists(frameworksDir))
llmodelSearchPaths += ";" + frameworksDir;
#endif
LLModel::Implementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString());
2023-06-27 15:54:34 +00:00
qmlRegisterSingletonInstance("mysettings", 1, 0, "MySettings", MySettings::globalInstance());
2023-06-22 19:44:49 +00:00
qmlRegisterSingletonInstance("modellist", 1, 0, "ModelList", ModelList::globalInstance());
qmlRegisterSingletonInstance("chatlistmodel", 1, 0, "ChatListModel", ChatListModel::globalInstance());
2023-04-09 03:28:39 +00:00
qmlRegisterSingletonInstance("llm", 1, 0, "LLM", LLM::globalInstance());
2023-04-19 01:10:06 +00:00
qmlRegisterSingletonInstance("download", 1, 0, "Download", Download::globalInstance());
qmlRegisterSingletonInstance("network", 1, 0, "Network", Network::globalInstance());
2023-05-18 22:59:10 +00:00
qmlRegisterSingletonInstance("localdocs", 1, 0, "LocalDocs", LocalDocs::globalInstance());
const QUrl url(u"qrc:/gpt4all/main.qml"_qs);
2023-04-09 03:28:39 +00:00
QObject::connect(&engine, &QQmlApplicationEngine::objectCreated,
&app, [url](QObject *obj, const QUrl &objUrl) {
if (!obj && url == objUrl)
QCoreApplication::exit(-1);
}, Qt::QueuedConnection);
engine.load(url);
2023-04-19 01:10:06 +00:00
#if 0
2023-04-09 03:28:39 +00:00
QDirIterator it("qrc:", QDirIterator::Subdirectories);
while (it.hasNext()) {
qDebug() << it.next();
}
#endif
int res = app.exec();
// Make sure ChatLLM threads are joined before global destructors run.
// Otherwise, we can get a heap-use-after-free inside of llama.cpp.
ChatListModel::globalInstance()->destroyChats();
return res;
2023-04-09 03:28:39 +00:00
}