gpt4all/gpt4all-chat/main.cpp

74 lines
2.7 KiB
C++
Raw Normal View History

#include <QDirIterator>
2023-04-08 23:28:39 -04:00
#include <QGuiApplication>
#include <QQmlApplicationEngine>
#include <QQmlContext>
#include "llm.h"
2023-06-22 15:44:49 -04:00
#include "modellist.h"
#include "chatlistmodel.h"
2023-05-18 18:59:10 -04:00
#include "localdocs.h"
2023-04-18 21:10:06 -04:00
#include "download.h"
#include "network.h"
2023-06-27 11:54:34 -04:00
#include "mysettings.h"
2023-04-16 19:20:43 -04:00
#include "config.h"
2023-06-01 10:50:42 -04:00
#include "logger.h"
#include "../gpt4all-backend/llmodel.h"
2023-04-08 23:28:39 -04:00
int main(int argc, char *argv[])
{
2023-04-18 21:10:06 -04:00
QCoreApplication::setOrganizationName("nomic.ai");
QCoreApplication::setOrganizationDomain("gpt4all.io");
QCoreApplication::setApplicationName("GPT4All");
2023-04-16 19:20:43 -04:00
QCoreApplication::setApplicationVersion(APP_VERSION);
2023-06-01 10:50:42 -04:00
Logger::globalInstance();
2023-04-08 23:28:39 -04:00
QGuiApplication app(argc, argv);
QQmlApplicationEngine engine;
QString llmodelSearchPaths = QCoreApplication::applicationDirPath();
const QString libDir = QCoreApplication::applicationDirPath() + "/../lib/";
if (LLM::directoryExists(libDir))
llmodelSearchPaths += ";" + libDir;
#if defined(Q_OS_MAC)
const QString binDir = QCoreApplication::applicationDirPath() + "/../../../";
if (LLM::directoryExists(binDir))
llmodelSearchPaths += ";" + binDir;
const QString frameworksDir = QCoreApplication::applicationDirPath() + "/../Frameworks/";
if (LLM::directoryExists(frameworksDir))
llmodelSearchPaths += ";" + frameworksDir;
#endif
LLModel::Implementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString());
2023-06-27 11:54:34 -04:00
qmlRegisterSingletonInstance("mysettings", 1, 0, "MySettings", MySettings::globalInstance());
2023-06-22 15:44:49 -04:00
qmlRegisterSingletonInstance("modellist", 1, 0, "ModelList", ModelList::globalInstance());
qmlRegisterSingletonInstance("chatlistmodel", 1, 0, "ChatListModel", ChatListModel::globalInstance());
2023-04-08 23:28:39 -04:00
qmlRegisterSingletonInstance("llm", 1, 0, "LLM", LLM::globalInstance());
2023-04-18 21:10:06 -04:00
qmlRegisterSingletonInstance("download", 1, 0, "Download", Download::globalInstance());
qmlRegisterSingletonInstance("network", 1, 0, "Network", Network::globalInstance());
2023-05-18 18:59:10 -04:00
qmlRegisterSingletonInstance("localdocs", 1, 0, "LocalDocs", LocalDocs::globalInstance());
const QUrl url(u"qrc:/gpt4all/main.qml"_qs);
2023-04-08 23:28:39 -04:00
QObject::connect(&engine, &QQmlApplicationEngine::objectCreated,
&app, [url](QObject *obj, const QUrl &objUrl) {
if (!obj && url == objUrl)
QCoreApplication::exit(-1);
}, Qt::QueuedConnection);
engine.load(url);
2023-04-18 21:10:06 -04:00
#if 0
2023-04-08 23:28:39 -04:00
QDirIterator it("qrc:", QDirIterator::Subdirectories);
while (it.hasNext()) {
qDebug() << it.next();
}
#endif
int res = app.exec();
// Make sure ChatLLM threads are joined before global destructors run.
// Otherwise, we can get a heap-use-after-free inside of llama.cpp.
ChatListModel::globalInstance()->clearChats();
return res;
2023-04-08 23:28:39 -04:00
}