mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
chatllm: do not pass nullptr as response callback (#2995)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
50949d304e
commit
364d9772e4
@ -9,6 +9,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
||||
### Added
|
||||
- Add bm25 hybrid search to localdocs ([#2969](https://github.com/nomic-ai/gpt4all/pull/2969))
|
||||
|
||||
### Fixed
|
||||
- Fix a crash when attempting to continue a chat loaded from disk ([#2995](https://github.com/nomic-ai/gpt4all/pull/2995))
|
||||
|
||||
## [3.3.0] - 2024-09-20
|
||||
|
||||
### Added
|
||||
|
@ -1314,8 +1314,16 @@ void ChatLLM::processRestoreStateFromText()
|
||||
auto &response = *it++;
|
||||
Q_ASSERT(response.first != "Prompt: ");
|
||||
|
||||
m_llModelInfo.model->prompt(prompt.second.toStdString(), promptTemplate.toStdString(), promptFunc, nullptr,
|
||||
/*allowContextShift*/ true, m_ctx, false, response.second.toUtf8().constData());
|
||||
// FIXME(jared): this doesn't work well with the "regenerate" button since we are not incrementing
|
||||
// m_promptTokens or m_promptResponseTokens
|
||||
m_llModelInfo.model->prompt(
|
||||
prompt.second.toStdString(), promptTemplate.toStdString(),
|
||||
promptFunc, /*responseFunc*/ [](auto &&...) { return true; },
|
||||
/*allowContextShift*/ true,
|
||||
m_ctx,
|
||||
/*special*/ false,
|
||||
response.second.toUtf8().constData()
|
||||
);
|
||||
}
|
||||
|
||||
if (!m_stopGenerating) {
|
||||
|
Loading…
Reference in New Issue
Block a user