mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
Bump the version and save up to an order of magnitude of disk space for chat files.
This commit is contained in:
parent
8d2c8c8cb0
commit
e397fda250
@ -14,7 +14,7 @@ endif()
|
||||
|
||||
set(APP_VERSION_MAJOR 2)
|
||||
set(APP_VERSION_MINOR 4)
|
||||
set(APP_VERSION_PATCH 0)
|
||||
set(APP_VERSION_PATCH 1)
|
||||
set(APP_VERSION "${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}")
|
||||
|
||||
# Generate a header file with the version number
|
||||
|
@ -365,7 +365,8 @@ bool ChatLLM::serialize(QDataStream &stream)
|
||||
stream << quint64(m_ctx.tokens.size());
|
||||
stream.writeRawData(reinterpret_cast<const char*>(m_ctx.tokens.data()), m_ctx.tokens.size() * sizeof(int));
|
||||
saveState();
|
||||
stream << m_state;
|
||||
QByteArray compressed = qCompress(m_state);
|
||||
stream << compressed;
|
||||
return stream.status() == QDataStream::Ok;
|
||||
}
|
||||
|
||||
@ -388,7 +389,9 @@ bool ChatLLM::deserialize(QDataStream &stream)
|
||||
stream >> tokensSize;
|
||||
m_ctx.tokens.resize(tokensSize);
|
||||
stream.readRawData(reinterpret_cast<char*>(m_ctx.tokens.data()), tokensSize * sizeof(int));
|
||||
stream >> m_state;
|
||||
QByteArray compressed;
|
||||
stream >> compressed;
|
||||
m_state = qUncompress(compressed);
|
||||
return stream.status() == QDataStream::Ok;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user