Add debug for chatllm model loading and fix order of getting rid of the

dummy chat when no models are restored.
This commit is contained in:
Adam Treat 2023-05-07 11:24:07 -04:00
parent 9bd5609ba0
commit 7b66cb7119
3 changed files with 34 additions and 3 deletions

View File

@ -231,8 +231,13 @@ void ChatListModel::restoreChat(Chat *chat)
void ChatListModel::chatsRestoredFinished()
{
if (m_dummyChat) {
removeChat(m_dummyChat);
beginResetModel();
Chat *dummy = m_dummyChat;
m_dummyChat = nullptr;
m_chats.clear();
addChat();
delete dummy;
endResetModel();
}
if (m_chats.isEmpty())

View File

@ -100,6 +100,13 @@ bool ChatLLM::loadModel(const QString &modelName)
m_llmodel->loadModel(filePath.toStdString());
}
restoreState();
#if defined(DEBUG)
qDebug() << "chatllm modelLoadedChanged" << m_chat->id();
fflush(stdout);
#endif
emit isModelLoadedChanged();
if (isFirstLoad)
@ -200,6 +207,9 @@ bool ChatLLM::handlePrompt(int32_t token)
{
// m_promptResponseTokens and m_responseLogits are related to last prompt/response not
// the entire context window which we can reset on regenerate prompt
#if defined(DEBUG)
qDebug() << "chatllm prompt process" << m_chat->id() << token;
#endif
++m_promptResponseTokens;
return !m_stopGenerating;
}
@ -280,6 +290,9 @@ bool ChatLLM::prompt(const QString &prompt, const QString &prompt_template, int3
void ChatLLM::unloadModel()
{
#if defined(DEBUG)
qDebug() << "chatllm unloadModel" << m_chat->id();
#endif
saveState();
delete m_llmodel;
m_llmodel = nullptr;
@ -288,12 +301,14 @@ void ChatLLM::unloadModel()
void ChatLLM::reloadModel(const QString &modelName)
{
#if defined(DEBUG)
qDebug() << "chatllm reloadModel" << m_chat->id();
#endif
if (modelName.isEmpty()) {
loadDefaultModel();
} else {
loadModel(modelName);
}
restoreState();
}
void ChatLLM::generateName()
@ -367,6 +382,9 @@ bool ChatLLM::serialize(QDataStream &stream)
saveState();
QByteArray compressed = qCompress(m_state);
stream << compressed;
#if defined(DEBUG)
qDebug() << "chatllm serialize" << m_chat->id() << m_state.size();
#endif
return stream.status() == QDataStream::Ok;
}
@ -392,6 +410,9 @@ bool ChatLLM::deserialize(QDataStream &stream)
QByteArray compressed;
stream >> compressed;
m_state = qUncompress(compressed);
#if defined(DEBUG)
qDebug() << "chatllm deserialize" << m_chat->id();
#endif
return stream.status() == QDataStream::Ok;
}
@ -402,6 +423,9 @@ void ChatLLM::saveState()
const size_t stateSize = m_llmodel->stateSize();
m_state.resize(stateSize);
#if defined(DEBUG)
qDebug() << "chatllm saveState" << m_chat->id() << "size:" << m_state.size();
#endif
m_llmodel->saveState(static_cast<uint8_t*>(reinterpret_cast<void*>(m_state.data())));
}
@ -410,5 +434,8 @@ void ChatLLM::restoreState()
if (!isModelLoaded() || m_state.isEmpty())
return;
#if defined(DEBUG)
qDebug() << "chatllm restoreState" << m_chat->id() << "size:" << m_state.size();
#endif
m_llmodel->restoreState(static_cast<const uint8_t*>(reinterpret_cast<void*>(m_state.data())));
}

View File

@ -60,7 +60,6 @@ struct gptj_buffer {
}
~gptj_buffer() {
std::cout << "yes we are cleaning up" << std::endl;
fflush(stdout);
delete[] addr;
}