From a0bd96f75d119e5ce6a5717a8f138937f880677f Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Wed, 6 Mar 2024 16:42:59 -0500 Subject: [PATCH] chat: join ChatLLM threads without calling destructors (#2043) Signed-off-by: Jared Van Bortel --- gpt4all-chat/chat.h | 1 + gpt4all-chat/chatlistmodel.h | 9 ++------- gpt4all-chat/chatllm.cpp | 4 ++++ gpt4all-chat/chatllm.h | 1 + gpt4all-chat/main.cpp | 2 +- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/gpt4all-chat/chat.h b/gpt4all-chat/chat.h index cecbcbda..7edbfba2 100644 --- a/gpt4all-chat/chat.h +++ b/gpt4all-chat/chat.h @@ -46,6 +46,7 @@ public: explicit Chat(QObject *parent = nullptr); explicit Chat(bool isServer, QObject *parent = nullptr); virtual ~Chat(); + void destroy() { m_llmodel->destroy(); } void connectLLM(); QString id() const { return m_id; } diff --git a/gpt4all-chat/chatlistmodel.h b/gpt4all-chat/chatlistmodel.h index ed04cc7a..fbbb99a3 100644 --- a/gpt4all-chat/chatlistmodel.h +++ b/gpt4all-chat/chatlistmodel.h @@ -192,13 +192,8 @@ public: int count() const { return m_chats.size(); } - void clearChats() { - m_newChat = nullptr; - m_serverChat = nullptr; - m_currentChat = nullptr; - for (auto * chat: m_chats) { delete chat; } - m_chats.clear(); - } + // stop ChatLLM threads for clean shutdown + void destroyChats() { for (auto *chat: m_chats) { chat->destroy(); } } void removeChatFile(Chat *chat) const; Q_INVOKABLE void saveChats(); diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index cc1bebe3..fa2d5539 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -95,6 +95,10 @@ ChatLLM::ChatLLM(Chat *parent, bool isServer) ChatLLM::~ChatLLM() { + destroy(); +} + +void ChatLLM::destroy() { m_stopGenerating = true; m_llmThread.quit(); m_llmThread.wait(); diff --git a/gpt4all-chat/chatllm.h b/gpt4all-chat/chatllm.h index da848338..f4015d1b 100644 --- a/gpt4all-chat/chatllm.h +++ b/gpt4all-chat/chatllm.h @@ -72,6 +72,7 @@ public: ChatLLM(Chat *parent, bool isServer = false); virtual ~ChatLLM(); + void destroy(); bool isModelLoaded() const; void regenerateResponse(); void resetResponse(); diff --git a/gpt4all-chat/main.cpp b/gpt4all-chat/main.cpp index 7debe8be..705278df 100644 --- a/gpt4all-chat/main.cpp +++ b/gpt4all-chat/main.cpp @@ -67,7 +67,7 @@ int main(int argc, char *argv[]) // Make sure ChatLLM threads are joined before global destructors run. // Otherwise, we can get a heap-use-after-free inside of llama.cpp. - ChatListModel::globalInstance()->clearChats(); + ChatListModel::globalInstance()->destroyChats(); return res; }