chatllm: do not pass nullptr as response callback (#2995)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-09-26 17:07:01 -04:00 committed by GitHub
parent 50949d304e
commit 364d9772e4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 13 additions and 2 deletions

View File

@ -9,6 +9,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
### Added
- Add bm25 hybrid search to localdocs ([#2969](https://github.com/nomic-ai/gpt4all/pull/2969))
### Fixed
- Fix a crash when attempting to continue a chat loaded from disk ([#2995](https://github.com/nomic-ai/gpt4all/pull/2995))
## [3.3.0] - 2024-09-20
### Added

View File

@ -1314,8 +1314,16 @@ void ChatLLM::processRestoreStateFromText()
auto &response = *it++;
Q_ASSERT(response.first != "Prompt: ");
m_llModelInfo.model->prompt(prompt.second.toStdString(), promptTemplate.toStdString(), promptFunc, nullptr,
/*allowContextShift*/ true, m_ctx, false, response.second.toUtf8().constData());
// FIXME(jared): this doesn't work well with the "regenerate" button since we are not incrementing
// m_promptTokens or m_promptResponseTokens
m_llModelInfo.model->prompt(
prompt.second.toStdString(), promptTemplate.toStdString(),
promptFunc, /*responseFunc*/ [](auto &&...) { return true; },
/*allowContextShift*/ true,
m_ctx,
/*special*/ false,
response.second.toUtf8().constData()
);
}
if (!m_stopGenerating) {