diff --git a/gpt4all-chat/CHANGELOG.md b/gpt4all-chat/CHANGELOG.md index ca17361b..2e0ab50a 100644 --- a/gpt4all-chat/CHANGELOG.md +++ b/gpt4all-chat/CHANGELOG.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). ### Fixed - Fix a crash when attempting to continue a chat loaded from disk ([#2995](https://github.com/nomic-ai/gpt4all/pull/2995)) - Fix the local server rejecting min\_p/top\_p less than 1 ([#2996](https://github.com/nomic-ai/gpt4all/pull/2996)) +- Fix "regenerate" always forgetting the most recent message ([#3011](https://github.com/nomic-ai/gpt4all/pull/3011)) ## [3.3.1] - 2024-09-27 ([v3.3.y](https://github.com/nomic-ai/gpt4all/tree/v3.3.y)) diff --git a/gpt4all-chat/src/chat.cpp b/gpt4all-chat/src/chat.cpp index dd0bf1ec..3a313887 100644 --- a/gpt4all-chat/src/chat.cpp +++ b/gpt4all-chat/src/chat.cpp @@ -238,8 +238,9 @@ void Chat::newPromptResponsePair(const QString &prompt) { resetResponseState(); m_chatModel->updateCurrentResponse(m_chatModel->count() - 1, false); + // the prompt is passed as the prompt item's value and the response item's prompt m_chatModel->appendPrompt("Prompt: ", prompt); - m_chatModel->appendResponse("Response: ", QString()); + m_chatModel->appendResponse("Response: ", prompt); emit resetResponseRequested(); } @@ -248,8 +249,9 @@ void Chat::serverNewPromptResponsePair(const QString &prompt) { resetResponseState(); m_chatModel->updateCurrentResponse(m_chatModel->count() - 1, false); + // the prompt is passed as the prompt item's value and the response item's prompt m_chatModel->appendPrompt("Prompt: ", prompt); - m_chatModel->appendResponse("Response: ", QString()); + m_chatModel->appendResponse("Response: ", prompt); } bool Chat::restoringFromText() const