chatllm: fix loading of chats after #2676 (#2693)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-07-18 21:03:18 -04:00 committed by GitHub
parent 62abecaec8
commit 56d5a23001
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 7 additions and 2 deletions

View File

@ -37,6 +37,7 @@ using namespace Qt::Literals::StringLiterals;
//#define DEBUG
//#define DEBUG_MODEL_LOADING
#define GPTJ_INTERNAL_STATE_VERSION 0 // GPT-J is gone but old chats still use this
#define LLAMA_INTERNAL_STATE_VERSION 0
class LLModelStore {
@ -1055,6 +1056,7 @@ bool ChatLLM::serialize(QDataStream &stream, int version, bool serializeKV)
if (version > 1) {
stream << m_llModelType;
switch (m_llModelType) {
case GPTJ_: stream << GPTJ_INTERNAL_STATE_VERSION; break;
case LLAMA_: stream << LLAMA_INTERNAL_STATE_VERSION; break;
default: Q_UNREACHABLE();
}

View File

@ -28,9 +28,12 @@ using namespace Qt::Literals::StringLiterals;
class QDataStream;
// NOTE: values serialized to disk, do not change or reuse
enum LLModelType {
LLAMA_,
API_,
GPTJ_ = 0, // no longer used
LLAMA_ = 1,
API_ = 2,
BERT_ = 3, // no longer used
};
class ChatLLM;