mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
f291853e51
Limitations: 1) Context is not restored for gpt-j models 2) When you switch between different model types in an existing chat the context and all the conversation is lost 3) The settings are not chat or conversation specific 4) The sizes of the chat persisted files are very large due to how much data the llama.cpp backend tries to persist. Need to investigate how we can shrink this.
41 lines
827 B
C++
41 lines
827 B
C++
#ifndef LLM_H
|
|
#define LLM_H
|
|
|
|
#include <QObject>
|
|
|
|
#include "chatlistmodel.h"
|
|
|
|
class LLM : public QObject
|
|
{
|
|
Q_OBJECT
|
|
Q_PROPERTY(ChatListModel *chatListModel READ chatListModel NOTIFY chatListModelChanged)
|
|
Q_PROPERTY(int32_t threadCount READ threadCount WRITE setThreadCount NOTIFY threadCountChanged)
|
|
|
|
public:
|
|
static LLM *globalInstance();
|
|
|
|
ChatListModel *chatListModel() const { return m_chatListModel; }
|
|
int32_t threadCount() const;
|
|
void setThreadCount(int32_t n_threads);
|
|
|
|
Q_INVOKABLE bool checkForUpdates() const;
|
|
|
|
Q_SIGNALS:
|
|
void chatListModelChanged();
|
|
void threadCountChanged();
|
|
|
|
private Q_SLOTS:
|
|
void aboutToQuit();
|
|
|
|
private:
|
|
ChatListModel *m_chatListModel;
|
|
int32_t m_threadCount;
|
|
|
|
private:
|
|
explicit LLM();
|
|
~LLM() {}
|
|
friend class MyLLM;
|
|
};
|
|
|
|
#endif // LLM_H
|