diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index 578c3b31..b940aa7f 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -210,11 +210,13 @@ qt_add_qml_module(chat icons/network.svg icons/nomic_logo.svg icons/notes.svg + icons/plus.svg icons/recycle.svg icons/regenerate.svg icons/search.svg icons/send_message.svg icons/settings.svg + icons/stack.svg icons/stop_generating.svg icons/thumbs_down.svg icons/thumbs_up.svg diff --git a/gpt4all-chat/chat.cpp b/gpt4all-chat/chat.cpp index e51126ad..138f2c7c 100644 --- a/gpt4all-chat/chat.cpp +++ b/gpt4all-chat/chat.cpp @@ -58,11 +58,13 @@ void Chat::connectLLM() connect(m_llmodel, &ChatLLM::modelLoadingPercentageChanged, this, &Chat::handleModelLoadingPercentageChanged, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::responseChanged, this, &Chat::handleResponseChanged, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::promptProcessing, this, &Chat::promptProcessing, Qt::QueuedConnection); + connect(m_llmodel, &ChatLLM::generatingQuestions, this, &Chat::generatingQuestions, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::responseStopped, this, &Chat::responseStopped, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::modelLoadingError, this, &Chat::handleModelLoadingError, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::modelLoadingWarning, this, &Chat::modelLoadingWarning, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::recalcChanged, this, &Chat::handleRecalculating, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::generatedNameChanged, this, &Chat::generatedNameChanged, Qt::QueuedConnection); + connect(m_llmodel, &ChatLLM::generatedQuestionFinished, this, &Chat::generatedQuestionFinished, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::reportSpeed, this, &Chat::handleTokenSpeedChanged, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::loadedModelInfoChanged, this, &Chat::loadedModelInfoChanged, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::databaseResultsChanged, this, &Chat::handleDatabaseResultsChanged, Qt::QueuedConnection); @@ -113,6 +115,8 @@ void Chat::resetResponseState() if (m_responseInProgress && m_responseState == Chat::LocalDocsRetrieval) return; + m_generatedQuestions = QList(); + emit generatedQuestionsChanged(); m_tokenSpeed = QString(); emit tokenSpeedChanged(); m_responseInProgress = true; @@ -186,6 +190,12 @@ void Chat::handleModelLoadingPercentageChanged(float loadingPercentage) void Chat::promptProcessing() { m_responseState = !databaseResults().isEmpty() ? Chat::LocalDocsProcessing : Chat::PromptProcessing; + emit responseStateChanged(); +} + +void Chat::generatingQuestions() +{ + m_responseState = Chat::GeneratingQuestions; emit responseStateChanged(); } @@ -304,6 +314,12 @@ void Chat::generatedNameChanged(const QString &name) emit nameChanged(); } +void Chat::generatedQuestionFinished(const QString &question) +{ + m_generatedQuestions << question; + emit generatedQuestionsChanged(); +} + void Chat::handleRecalculating() { Network::globalInstance()->trackChatEvent("recalc_context", { {"length", m_chatModel->count()} }); diff --git a/gpt4all-chat/chat.h b/gpt4all-chat/chat.h index 019caf89..c9b95f55 100644 --- a/gpt4all-chat/chat.h +++ b/gpt4all-chat/chat.h @@ -39,6 +39,7 @@ class Chat : public QObject Q_PROPERTY(LocalDocsCollectionsModel *collectionModel READ collectionModel NOTIFY collectionModelChanged) // 0=no, 1=waiting, 2=working Q_PROPERTY(int trySwitchContextInProgress READ trySwitchContextInProgress NOTIFY trySwitchContextInProgressChanged) + Q_PROPERTY(QList generatedQuestions READ generatedQuestions NOTIFY generatedQuestionsChanged) QML_ELEMENT QML_UNCREATABLE("Only creatable from c++!") @@ -48,6 +49,7 @@ public: LocalDocsRetrieval, LocalDocsProcessing, PromptProcessing, + GeneratingQuestions, ResponseGeneration }; Q_ENUM(ResponseState) @@ -119,6 +121,8 @@ public: int trySwitchContextInProgress() const { return m_trySwitchContextInProgress; } + QList generatedQuestions() const { return m_generatedQuestions; } + public Q_SLOTS: void serverNewPromptResponsePair(const QString &prompt); @@ -153,13 +157,16 @@ Q_SIGNALS: void collectionModelChanged(); void trySwitchContextInProgressChanged(); void loadedModelInfoChanged(); + void generatedQuestionsChanged(); private Q_SLOTS: void handleResponseChanged(const QString &response); void handleModelLoadingPercentageChanged(float); void promptProcessing(); + void generatingQuestions(); void responseStopped(qint64 promptResponseMs); void generatedNameChanged(const QString &name); + void generatedQuestionFinished(const QString &question); void handleRecalculating(); void handleModelLoadingError(const QString &error); void handleTokenSpeedChanged(const QString &tokenSpeed); @@ -179,6 +186,7 @@ private: QString m_fallbackReason; QString m_response; QList m_collections; + QList m_generatedQuestions; ChatModel *m_chatModel; bool m_responseInProgress = false; ResponseState m_responseState; diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index dcfc2dd7..611f8595 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -750,7 +750,7 @@ bool ChatLLM::promptInternal(const QList &collectionList, const QString if (!databaseResults.isEmpty()) { QStringList results; for (const ResultInfo &info : databaseResults) - results << u"Collection: %1\nPath: %2\nSnippet: %3"_s.arg(info.collection, info.path, info.text); + results << u"Collection: %1\nPath: %2\nExcerpt: %3"_s.arg(info.collection, info.path, info.text); // FIXME(jared): use a Jinja prompt template instead of hardcoded Alpaca-style localdocs template docsContext = u"### Context:\n%1\n\n"_s.arg(results.join("\n\n")); @@ -797,7 +797,13 @@ bool ChatLLM::promptInternal(const QList &collectionList, const QString m_response = trimmed; emit responseChanged(QString::fromStdString(m_response)); } - emit responseStopped(elapsed); + + SuggestionMode mode = MySettings::globalInstance()->suggestionMode(); + if (mode == SuggestionMode::On || (!databaseResults.isEmpty() && mode == SuggestionMode::LocalDocsOnly)) + generateQuestions(elapsed); + else + emit responseStopped(elapsed); + m_pristineLoadedState = false; return true; } @@ -875,13 +881,19 @@ void ChatLLM::generateName() if (!isModelLoaded()) return; + const QString chatNamePrompt = MySettings::globalInstance()->modelChatNamePrompt(m_modelInfo); + if (chatNamePrompt.trimmed().isEmpty()) { + qWarning() << "ChatLLM: not generating chat name because prompt is empty"; + return; + } + auto promptTemplate = MySettings::globalInstance()->modelPromptTemplate(m_modelInfo); auto promptFunc = std::bind(&ChatLLM::handleNamePrompt, this, std::placeholders::_1); auto responseFunc = std::bind(&ChatLLM::handleNameResponse, this, std::placeholders::_1, std::placeholders::_2); auto recalcFunc = std::bind(&ChatLLM::handleNameRecalculate, this, std::placeholders::_1); LLModel::PromptContext ctx = m_ctx; - m_llModelInfo.model->prompt("Describe the above conversation in seven words or less.", - promptTemplate.toStdString(), promptFunc, responseFunc, recalcFunc, ctx); + m_llModelInfo.model->prompt(chatNamePrompt.toStdString(), promptTemplate.toStdString(), + promptFunc, responseFunc, recalcFunc, ctx); std::string trimmed = trim_whitespace(m_nameResponse); if (trimmed != m_nameResponse) { m_nameResponse = trimmed; @@ -901,7 +913,6 @@ bool ChatLLM::handleNamePrompt(int32_t token) qDebug() << "name prompt" << m_llmThread.objectName() << token; #endif Q_UNUSED(token); - qt_noop(); return !m_stopGenerating; } @@ -925,10 +936,84 @@ bool ChatLLM::handleNameRecalculate(bool isRecalc) qDebug() << "name recalc" << m_llmThread.objectName() << isRecalc; #endif Q_UNUSED(isRecalc); - qt_noop(); return true; } +bool ChatLLM::handleQuestionPrompt(int32_t token) +{ +#if defined(DEBUG) + qDebug() << "question prompt" << m_llmThread.objectName() << token; +#endif + Q_UNUSED(token); + return !m_stopGenerating; +} + +bool ChatLLM::handleQuestionResponse(int32_t token, const std::string &response) +{ +#if defined(DEBUG) + qDebug() << "question response" << m_llmThread.objectName() << token << response; +#endif + Q_UNUSED(token); + + // add token to buffer + m_questionResponse.append(response); + + // match whole question sentences + static const QRegularExpression reQuestion(R"(\b(What|Where|How|Why|When|Who|Which|Whose|Whom)\b[^?]*\?)"); + + // extract all questions from response + int lastMatchEnd = -1; + for (const auto &match : reQuestion.globalMatch(m_questionResponse)) { + lastMatchEnd = match.capturedEnd(); + emit generatedQuestionFinished(match.captured()); + } + + // remove processed input from buffer + if (lastMatchEnd != -1) + m_questionResponse.erase(m_questionResponse.cbegin(), m_questionResponse.cbegin() + lastMatchEnd); + + return true; +} + +bool ChatLLM::handleQuestionRecalculate(bool isRecalc) +{ +#if defined(DEBUG) + qDebug() << "name recalc" << m_llmThread.objectName() << isRecalc; +#endif + Q_UNUSED(isRecalc); + return true; +} + +void ChatLLM::generateQuestions(qint64 elapsed) +{ + Q_ASSERT(isModelLoaded()); + if (!isModelLoaded()) { + emit responseStopped(elapsed); + return; + } + + const std::string suggestedFollowUpPrompt = MySettings::globalInstance()->modelSuggestedFollowUpPrompt(m_modelInfo).toStdString(); + if (QString::fromStdString(suggestedFollowUpPrompt).trimmed().isEmpty()) { + emit responseStopped(elapsed); + return; + } + + emit generatingQuestions(); + m_questionResponse.clear(); + auto promptTemplate = MySettings::globalInstance()->modelPromptTemplate(m_modelInfo); + auto promptFunc = std::bind(&ChatLLM::handleQuestionPrompt, this, std::placeholders::_1); + auto responseFunc = std::bind(&ChatLLM::handleQuestionResponse, this, std::placeholders::_1, std::placeholders::_2); + auto recalcFunc = std::bind(&ChatLLM::handleQuestionRecalculate, this, std::placeholders::_1); + LLModel::PromptContext ctx = m_ctx; + QElapsedTimer totalTime; + totalTime.start(); + m_llModelInfo.model->prompt(suggestedFollowUpPrompt, + promptTemplate.toStdString(), promptFunc, responseFunc, recalcFunc, ctx); + elapsed += totalTime.elapsed(); + emit responseStopped(elapsed); +} + + bool ChatLLM::handleSystemPrompt(int32_t token) { #if defined(DEBUG) diff --git a/gpt4all-chat/chatllm.h b/gpt4all-chat/chatllm.h index 01237f84..17405d54 100644 --- a/gpt4all-chat/chatllm.h +++ b/gpt4all-chat/chatllm.h @@ -160,6 +160,7 @@ public Q_SLOTS: void unloadModel(); void reloadModel(); void generateName(); + void generateQuestions(qint64 elapsed); void handleChatIdChanged(const QString &id); void handleShouldBeLoadedChanged(); void handleThreadStarted(); @@ -176,8 +177,10 @@ Q_SIGNALS: void modelLoadingWarning(const QString &warning); void responseChanged(const QString &response); void promptProcessing(); + void generatingQuestions(); void responseStopped(qint64 promptResponseMs); void generatedNameChanged(const QString &name); + void generatedQuestionFinished(const QString &generatedQuestion); void stateChanged(); void threadStarted(); void shouldBeLoadedChanged(); @@ -206,6 +209,9 @@ protected: bool handleRestoreStateFromTextPrompt(int32_t token); bool handleRestoreStateFromTextResponse(int32_t token, const std::string &response); bool handleRestoreStateFromTextRecalculate(bool isRecalc); + bool handleQuestionPrompt(int32_t token); + bool handleQuestionResponse(int32_t token, const std::string &response); + bool handleQuestionRecalculate(bool isRecalc); void saveState(); void restoreState(); @@ -219,6 +225,7 @@ private: std::string m_response; std::string m_nameResponse; + QString m_questionResponse; LLModelInfo m_llModelInfo; LLModelType m_llModelType; ModelInfo m_modelInfo; diff --git a/gpt4all-chat/icons/plus.svg b/gpt4all-chat/icons/plus.svg new file mode 100644 index 00000000..79c378c6 --- /dev/null +++ b/gpt4all-chat/icons/plus.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/gpt4all-chat/icons/stack.svg b/gpt4all-chat/icons/stack.svg new file mode 100644 index 00000000..d40c17f6 --- /dev/null +++ b/gpt4all-chat/icons/stack.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index 3312c82d..cfa4667d 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -334,6 +334,28 @@ void ModelInfo::setSystemPrompt(const QString &p) m_systemPrompt = p; } +QString ModelInfo::chatNamePrompt() const +{ + return MySettings::globalInstance()->modelChatNamePrompt(*this); +} + +void ModelInfo::setChatNamePrompt(const QString &p) +{ + if (shouldSaveMetadata()) MySettings::globalInstance()->setModelChatNamePrompt(*this, p, true /*force*/); + m_chatNamePrompt = p; +} + +QString ModelInfo::suggestedFollowUpPrompt() const +{ + return MySettings::globalInstance()->modelSuggestedFollowUpPrompt(*this); +} + +void ModelInfo::setSuggestedFollowUpPrompt(const QString &p) +{ + if (shouldSaveMetadata()) MySettings::globalInstance()->setModelSuggestedFollowUpPrompt(*this, p, true /*force*/); + m_suggestedFollowUpPrompt = p; +} + bool ModelInfo::shouldSaveMetadata() const { return installed && (isClone() || isDiscovered() || description() == "" /*indicates sideloaded*/); @@ -364,6 +386,8 @@ QVariantMap ModelInfo::getFields() const { "repeatPenaltyTokens", m_repeatPenaltyTokens }, { "promptTemplate", m_promptTemplate }, { "systemPrompt", m_systemPrompt }, + { "chatNamePrompt", m_chatNamePrompt }, + { "suggestedFollowUpPrompt", m_suggestedFollowUpPrompt }, }; } @@ -758,6 +782,10 @@ QVariant ModelList::dataInternal(const ModelInfo *info, int role) const return info->promptTemplate(); case SystemPromptRole: return info->systemPrompt(); + case ChatNamePromptRole: + return info->chatNamePrompt(); + case SuggestedFollowUpPromptRole: + return info->suggestedFollowUpPrompt(); case LikesRole: return info->likes(); case DownloadsRole: @@ -928,6 +956,10 @@ void ModelList::updateData(const QString &id, const QVector info->setPromptTemplate(value.toString()); break; case SystemPromptRole: info->setSystemPrompt(value.toString()); break; + case ChatNamePromptRole: + info->setChatNamePrompt(value.toString()); break; + case SuggestedFollowUpPromptRole: + info->setSuggestedFollowUpPrompt(value.toString()); break; case LikesRole: { if (info->likes() != value.toInt()) { @@ -1077,6 +1109,8 @@ QString ModelList::clone(const ModelInfo &model) { ModelList::RepeatPenaltyTokensRole, model.repeatPenaltyTokens() }, { ModelList::PromptTemplateRole, model.promptTemplate() }, { ModelList::SystemPromptRole, model.systemPrompt() }, + { ModelList::ChatNamePromptRole, model.chatNamePrompt() }, + { ModelList::SuggestedFollowUpPromptRole, model.suggestedFollowUpPrompt() }, }; updateData(id, data); return id; @@ -1772,6 +1806,14 @@ void ModelList::updateModelsFromSettings() const QString systemPrompt = settings.value(g + "/systemPrompt").toString(); data.append({ ModelList::SystemPromptRole, systemPrompt }); } + if (settings.contains(g + "/chatNamePrompt")) { + const QString chatNamePrompt = settings.value(g + "/chatNamePrompt").toString(); + data.append({ ModelList::ChatNamePromptRole, chatNamePrompt }); + } + if (settings.contains(g + "/suggestedFollowUpPrompt")) { + const QString suggestedFollowUpPrompt = settings.value(g + "/suggestedFollowUpPrompt").toString(); + data.append({ ModelList::SuggestedFollowUpPromptRole, suggestedFollowUpPrompt }); + } updateData(id, data); } } diff --git a/gpt4all-chat/modellist.h b/gpt4all-chat/modellist.h index 8c81622f..29b26323 100644 --- a/gpt4all-chat/modellist.h +++ b/gpt4all-chat/modellist.h @@ -68,6 +68,8 @@ struct ModelInfo { Q_PROPERTY(int repeatPenaltyTokens READ repeatPenaltyTokens WRITE setRepeatPenaltyTokens) Q_PROPERTY(QString promptTemplate READ promptTemplate WRITE setPromptTemplate) Q_PROPERTY(QString systemPrompt READ systemPrompt WRITE setSystemPrompt) + Q_PROPERTY(QString chatNamePrompt READ chatNamePrompt WRITE setChatNamePrompt) + Q_PROPERTY(QString suggestedFollowUpPrompt READ suggestedFollowUpPrompt WRITE setSuggestedFollowUpPrompt) Q_PROPERTY(int likes READ likes WRITE setLikes) Q_PROPERTY(int downloads READ downloads WRITE setDownloads) Q_PROPERTY(QDateTime recency READ recency WRITE setRecency) @@ -167,6 +169,10 @@ public: void setPromptTemplate(const QString &t); QString systemPrompt() const; void setSystemPrompt(const QString &p); + QString chatNamePrompt() const; + void setChatNamePrompt(const QString &p); + QString suggestedFollowUpPrompt() const; + void setSuggestedFollowUpPrompt(const QString &p); bool shouldSaveMetadata() const; @@ -180,25 +186,27 @@ private: QString m_url; QString m_quant; QString m_type; - bool m_isClone = false; - bool m_isDiscovered = false; - int m_likes = -1; - int m_downloads = -1; + bool m_isClone = false; + bool m_isDiscovered = false; + int m_likes = -1; + int m_downloads = -1; QDateTime m_recency; - double m_temperature = 0.7; - double m_topP = 0.4; - double m_minP = 0.0; - int m_topK = 40; - int m_maxLength = 4096; - int m_promptBatchSize = 128; - int m_contextLength = 2048; - mutable int m_maxContextLength = -1; - int m_gpuLayers = 100; - mutable int m_maxGpuLayers = -1; - double m_repeatPenalty = 1.18; - int m_repeatPenaltyTokens = 64; - QString m_promptTemplate = "### Human:\n%1\n\n### Assistant:\n"; - QString m_systemPrompt = "### System:\nYou are an AI assistant who gives a quality response to whatever humans ask of you.\n\n"; + double m_temperature = 0.7; + double m_topP = 0.4; + double m_minP = 0.0; + int m_topK = 40; + int m_maxLength = 4096; + int m_promptBatchSize = 128; + int m_contextLength = 2048; + mutable int m_maxContextLength = -1; + int m_gpuLayers = 100; + mutable int m_maxGpuLayers = -1; + double m_repeatPenalty = 1.18; + int m_repeatPenaltyTokens = 64; + QString m_promptTemplate = "### Human:\n%1\n\n### Assistant:\n"; + QString m_systemPrompt = "### System:\nYou are an AI assistant who gives a quality response to whatever humans ask of you.\n\n"; + QString m_chatNamePrompt = "Describe the above conversation in seven words or less."; + QString m_suggestedFollowUpPrompt = "Suggest three very short factual follow-up questions that have not been answered yet or cannot be found inspired by the previous conversation and excerpts."; friend class MySettings; }; Q_DECLARE_METATYPE(ModelInfo) @@ -317,6 +325,8 @@ public: RepeatPenaltyTokensRole, PromptTemplateRole, SystemPromptRole, + ChatNamePromptRole, + SuggestedFollowUpPromptRole, MinPRole, LikesRole, DownloadsRole, @@ -368,6 +378,8 @@ public: roles[RepeatPenaltyTokensRole] = "repeatPenaltyTokens"; roles[PromptTemplateRole] = "promptTemplate"; roles[SystemPromptRole] = "systemPrompt"; + roles[ChatNamePromptRole] = "chatNamePrompt"; + roles[SuggestedFollowUpPromptRole] = "suggestedFollowUpPrompt"; roles[LikesRole] = "likes"; roles[DownloadsRole] = "downloads"; roles[RecencyRole] = "recency"; diff --git a/gpt4all-chat/mysettings.cpp b/gpt4all-chat/mysettings.cpp index 6ea6ae43..fae98e4a 100644 --- a/gpt4all-chat/mysettings.cpp +++ b/gpt4all-chat/mysettings.cpp @@ -41,6 +41,7 @@ static const QVariantMap basicDefaults { { "saveChatsContext", false }, { "serverChat", false }, { "userDefaultModel", "Application default" }, + { "suggestionMode", QVariant::fromValue(SuggestionMode::LocalDocsOnly) }, { "localdocs/chunkSize", 512 }, { "localdocs/retrievalSize", 3 }, { "localdocs/showReferences", true }, @@ -136,6 +137,8 @@ void MySettings::restoreModelDefaults(const ModelInfo &info) setModelRepeatPenaltyTokens(info, info.m_repeatPenaltyTokens); setModelPromptTemplate(info, info.m_promptTemplate); setModelSystemPrompt(info, info.m_systemPrompt); + setModelChatNamePrompt(info, info.m_chatNamePrompt); + setModelSuggestedFollowUpPrompt(info, info.m_suggestedFollowUpPrompt); } void MySettings::restoreApplicationDefaults() @@ -150,6 +153,7 @@ void MySettings::restoreApplicationDefaults() setModelPath(defaultLocalModelsPath()); setUserDefaultModel(basicDefaults.value("userDefaultModel").toString()); setForceMetal(defaults::forceMetal); + setSuggestionMode(basicDefaults.value("suggestionMode").value()); } void MySettings::restoreLocalDocsDefaults() @@ -212,28 +216,30 @@ void MySettings::setModelSetting(const QString &name, const ModelInfo &info, con QMetaObject::invokeMethod(this, u"%1Changed"_s.arg(name).toLatin1().constData(), Q_ARG(ModelInfo, info)); } -QString MySettings::modelFilename (const ModelInfo &info) const { return getModelSetting("filename", info).toString(); } -QString MySettings::modelDescription (const ModelInfo &info) const { return getModelSetting("description", info).toString(); } -QString MySettings::modelUrl (const ModelInfo &info) const { return getModelSetting("url", info).toString(); } -QString MySettings::modelQuant (const ModelInfo &info) const { return getModelSetting("quant", info).toString(); } -QString MySettings::modelType (const ModelInfo &info) const { return getModelSetting("type", info).toString(); } -bool MySettings::modelIsClone (const ModelInfo &info) const { return getModelSetting("isClone", info).toBool(); } -bool MySettings::modelIsDiscovered (const ModelInfo &info) const { return getModelSetting("isDiscovered", info).toBool(); } -int MySettings::modelLikes (const ModelInfo &info) const { return getModelSetting("likes", info).toInt(); } -int MySettings::modelDownloads (const ModelInfo &info) const { return getModelSetting("downloads", info).toInt(); } -QDateTime MySettings::modelRecency (const ModelInfo &info) const { return getModelSetting("recency", info).toDateTime(); } -double MySettings::modelTemperature (const ModelInfo &info) const { return getModelSetting("temperature", info).toDouble(); } -double MySettings::modelTopP (const ModelInfo &info) const { return getModelSetting("topP", info).toDouble(); } -double MySettings::modelMinP (const ModelInfo &info) const { return getModelSetting("minP", info).toDouble(); } -int MySettings::modelTopK (const ModelInfo &info) const { return getModelSetting("topK", info).toInt(); } -int MySettings::modelMaxLength (const ModelInfo &info) const { return getModelSetting("maxLength", info).toInt(); } -int MySettings::modelPromptBatchSize (const ModelInfo &info) const { return getModelSetting("promptBatchSize", info).toInt(); } -int MySettings::modelContextLength (const ModelInfo &info) const { return getModelSetting("contextLength", info).toInt(); } -int MySettings::modelGpuLayers (const ModelInfo &info) const { return getModelSetting("gpuLayers", info).toInt(); } -double MySettings::modelRepeatPenalty (const ModelInfo &info) const { return getModelSetting("repeatPenalty", info).toDouble(); } -int MySettings::modelRepeatPenaltyTokens(const ModelInfo &info) const { return getModelSetting("repeatPenaltyTokens", info).toInt(); } -QString MySettings::modelPromptTemplate (const ModelInfo &info) const { return getModelSetting("promptTemplate", info).toString(); } -QString MySettings::modelSystemPrompt (const ModelInfo &info) const { return getModelSetting("systemPrompt", info).toString(); } +QString MySettings::modelFilename (const ModelInfo &info) const { return getModelSetting("filename", info).toString(); } +QString MySettings::modelDescription (const ModelInfo &info) const { return getModelSetting("description", info).toString(); } +QString MySettings::modelUrl (const ModelInfo &info) const { return getModelSetting("url", info).toString(); } +QString MySettings::modelQuant (const ModelInfo &info) const { return getModelSetting("quant", info).toString(); } +QString MySettings::modelType (const ModelInfo &info) const { return getModelSetting("type", info).toString(); } +bool MySettings::modelIsClone (const ModelInfo &info) const { return getModelSetting("isClone", info).toBool(); } +bool MySettings::modelIsDiscovered (const ModelInfo &info) const { return getModelSetting("isDiscovered", info).toBool(); } +int MySettings::modelLikes (const ModelInfo &info) const { return getModelSetting("likes", info).toInt(); } +int MySettings::modelDownloads (const ModelInfo &info) const { return getModelSetting("downloads", info).toInt(); } +QDateTime MySettings::modelRecency (const ModelInfo &info) const { return getModelSetting("recency", info).toDateTime(); } +double MySettings::modelTemperature (const ModelInfo &info) const { return getModelSetting("temperature", info).toDouble(); } +double MySettings::modelTopP (const ModelInfo &info) const { return getModelSetting("topP", info).toDouble(); } +double MySettings::modelMinP (const ModelInfo &info) const { return getModelSetting("minP", info).toDouble(); } +int MySettings::modelTopK (const ModelInfo &info) const { return getModelSetting("topK", info).toInt(); } +int MySettings::modelMaxLength (const ModelInfo &info) const { return getModelSetting("maxLength", info).toInt(); } +int MySettings::modelPromptBatchSize (const ModelInfo &info) const { return getModelSetting("promptBatchSize", info).toInt(); } +int MySettings::modelContextLength (const ModelInfo &info) const { return getModelSetting("contextLength", info).toInt(); } +int MySettings::modelGpuLayers (const ModelInfo &info) const { return getModelSetting("gpuLayers", info).toInt(); } +double MySettings::modelRepeatPenalty (const ModelInfo &info) const { return getModelSetting("repeatPenalty", info).toDouble(); } +int MySettings::modelRepeatPenaltyTokens (const ModelInfo &info) const { return getModelSetting("repeatPenaltyTokens", info).toInt(); } +QString MySettings::modelPromptTemplate (const ModelInfo &info) const { return getModelSetting("promptTemplate", info).toString(); } +QString MySettings::modelSystemPrompt (const ModelInfo &info) const { return getModelSetting("systemPrompt", info).toString(); } +QString MySettings::modelChatNamePrompt (const ModelInfo &info) const { return getModelSetting("chatNamePrompt", info).toString(); } +QString MySettings::modelSuggestedFollowUpPrompt(const ModelInfo &info) const { return getModelSetting("suggestedFollowUpPrompt", info).toString(); } void MySettings::setModelFilename(const ModelInfo &info, const QString &value, bool force) { @@ -345,6 +351,16 @@ void MySettings::setModelSystemPrompt(const ModelInfo &info, const QString &valu setModelSetting("systemPrompt", info, value, force, true); } +void MySettings::setModelChatNamePrompt(const ModelInfo &info, const QString &value, bool force) +{ + setModelSetting("chatNamePrompt", info, value, force, true); +} + +void MySettings::setModelSuggestedFollowUpPrompt(const ModelInfo &info, const QString &value, bool force) +{ + setModelSetting("suggestedFollowUpPrompt", info, value, force, true); +} + int MySettings::threadCount() const { int c = m_settings.value("threadCount", defaults::threadCount).toInt(); @@ -368,21 +384,22 @@ void MySettings::setThreadCount(int value) emit threadCountChanged(); } -bool MySettings::saveChatsContext() const { return getBasicSetting("saveChatsContext" ).toBool(); } -bool MySettings::serverChat() const { return getBasicSetting("serverChat" ).toBool(); } -int MySettings::networkPort() const { return getBasicSetting("networkPort" ).toInt(); } -QString MySettings::userDefaultModel() const { return getBasicSetting("userDefaultModel" ).toString(); } -QString MySettings::chatTheme() const { return getBasicSetting("chatTheme" ).toString(); } -QString MySettings::fontSize() const { return getBasicSetting("fontSize" ).toString(); } -QString MySettings::lastVersionStarted() const { return getBasicSetting("lastVersionStarted" ).toString(); } -int MySettings::localDocsChunkSize() const { return getBasicSetting("localdocs/chunkSize" ).toInt(); } -int MySettings::localDocsRetrievalSize() const { return getBasicSetting("localdocs/retrievalSize" ).toInt(); } -bool MySettings::localDocsShowReferences() const { return getBasicSetting("localdocs/showReferences").toBool(); } -QStringList MySettings::localDocsFileExtensions() const { return getBasicSetting("localdocs/fileExtensions").toStringList(); } -bool MySettings::localDocsUseRemoteEmbed() const { return getBasicSetting("localdocs/useRemoteEmbed").toBool(); } -QString MySettings::localDocsNomicAPIKey() const { return getBasicSetting("localdocs/nomicAPIKey" ).toString(); } -QString MySettings::localDocsEmbedDevice() const { return getBasicSetting("localdocs/embedDevice" ).toString(); } -QString MySettings::networkAttribution() const { return getBasicSetting("network/attribution" ).toString(); } +bool MySettings::saveChatsContext() const { return getBasicSetting("saveChatsContext" ).toBool(); } +bool MySettings::serverChat() const { return getBasicSetting("serverChat" ).toBool(); } +int MySettings::networkPort() const { return getBasicSetting("networkPort" ).toInt(); } +QString MySettings::userDefaultModel() const { return getBasicSetting("userDefaultModel" ).toString(); } +QString MySettings::chatTheme() const { return getBasicSetting("chatTheme" ).toString(); } +QString MySettings::fontSize() const { return getBasicSetting("fontSize" ).toString(); } +QString MySettings::lastVersionStarted() const { return getBasicSetting("lastVersionStarted" ).toString(); } +int MySettings::localDocsChunkSize() const { return getBasicSetting("localdocs/chunkSize" ).toInt(); } +int MySettings::localDocsRetrievalSize() const { return getBasicSetting("localdocs/retrievalSize" ).toInt(); } +bool MySettings::localDocsShowReferences() const { return getBasicSetting("localdocs/showReferences").toBool(); } +QStringList MySettings::localDocsFileExtensions() const { return getBasicSetting("localdocs/fileExtensions").toStringList(); } +bool MySettings::localDocsUseRemoteEmbed() const { return getBasicSetting("localdocs/useRemoteEmbed").toBool(); } +QString MySettings::localDocsNomicAPIKey() const { return getBasicSetting("localdocs/nomicAPIKey" ).toString(); } +QString MySettings::localDocsEmbedDevice() const { return getBasicSetting("localdocs/embedDevice" ).toString(); } +QString MySettings::networkAttribution() const { return getBasicSetting("network/attribution" ).toString(); } +SuggestionMode MySettings::suggestionMode() const { return getBasicSetting("suggestionMode").value(); }; void MySettings::setSaveChatsContext(bool value) { setBasicSetting("saveChatsContext", value); } void MySettings::setServerChat(bool value) { setBasicSetting("serverChat", value); } @@ -399,6 +416,7 @@ void MySettings::setLocalDocsUseRemoteEmbed(bool value) { setBasic void MySettings::setLocalDocsNomicAPIKey(const QString &value) { setBasicSetting("localdocs/nomicAPIKey", value, "localDocsNomicAPIKey"); } void MySettings::setLocalDocsEmbedDevice(const QString &value) { setBasicSetting("localdocs/embedDevice", value, "localDocsEmbedDevice"); } void MySettings::setNetworkAttribution(const QString &value) { setBasicSetting("network/attribution", value, "networkAttribution"); } +void MySettings::setSuggestionMode(SuggestionMode value) { setBasicSetting("suggestionMode", int(value)); } QString MySettings::modelPath() { diff --git a/gpt4all-chat/mysettings.h b/gpt4all-chat/mysettings.h index 0fbc9033..a6f8c59b 100644 --- a/gpt4all-chat/mysettings.h +++ b/gpt4all-chat/mysettings.h @@ -13,6 +13,18 @@ #include #include +namespace MySettingsEnums { + Q_NAMESPACE + + enum class SuggestionMode { + LocalDocsOnly = 0, + On = 1, + Off = 2, + }; + Q_ENUM_NS(SuggestionMode) +} +using namespace MySettingsEnums; + class MySettings : public QObject { Q_OBJECT @@ -39,6 +51,7 @@ class MySettings : public QObject Q_PROPERTY(QStringList deviceList MEMBER m_deviceList CONSTANT) Q_PROPERTY(QStringList embeddingsDeviceList MEMBER m_embeddingsDeviceList CONSTANT) Q_PROPERTY(int networkPort READ networkPort WRITE setNetworkPort NOTIFY networkPortChanged) + Q_PROPERTY(SuggestionMode suggestionMode READ suggestionMode WRITE setSuggestionMode NOTIFY suggestionModeChanged) public: static MySettings *globalInstance(); @@ -98,6 +111,10 @@ public: Q_INVOKABLE void setModelContextLength(const ModelInfo &info, int value, bool force = false); int modelGpuLayers(const ModelInfo &info) const; Q_INVOKABLE void setModelGpuLayers(const ModelInfo &info, int value, bool force = false); + QString modelChatNamePrompt(const ModelInfo &info) const; + Q_INVOKABLE void setModelChatNamePrompt(const ModelInfo &info, const QString &value, bool force = false); + QString modelSuggestedFollowUpPrompt(const ModelInfo &info) const; + Q_INVOKABLE void setModelSuggestedFollowUpPrompt(const ModelInfo &info, const QString &value, bool force = false); // Application settings int threadCount() const; @@ -122,6 +139,8 @@ public: void setContextLength(int32_t value); int32_t gpuLayers() const; void setGpuLayers(int32_t value); + SuggestionMode suggestionMode() const; + void setSuggestionMode(SuggestionMode mode); // Release/Download settings QString lastVersionStarted() const; @@ -171,6 +190,8 @@ Q_SIGNALS: void repeatPenaltyTokensChanged(const ModelInfo &info); void promptTemplateChanged(const ModelInfo &info); void systemPromptChanged(const ModelInfo &info); + void chatNamePromptChanged(const ModelInfo &info); + void suggestedFollowUpPromptChanged(const ModelInfo &info); void threadCountChanged(); void saveChatsContextChanged(); void serverChatChanged(); @@ -193,6 +214,7 @@ Q_SIGNALS: void networkUsageStatsActiveChanged(); void attemptModelLoadChanged(); void deviceChanged(); + void suggestionModeChanged(); private: QSettings m_settings; diff --git a/gpt4all-chat/qml/ApplicationSettings.qml b/gpt4all-chat/qml/ApplicationSettings.qml index 2d302ce1..e20e8628 100644 --- a/gpt4all-chat/qml/ApplicationSettings.qml +++ b/gpt4all-chat/qml/ApplicationSettings.qml @@ -227,16 +227,40 @@ MySettingsTab { MySettings.userDefaultModel = comboBox.currentText } } + MySettingsLabel { + id: suggestionModeLabel + text: qsTr("Suggestion Mode") + helpText: qsTr("Generate suggested follow-up questions at the end of responses.") + Layout.row: 6 + Layout.column: 0 + } + MyComboBox { + id: suggestionModeBox + Layout.row: 6 + Layout.column: 2 + Layout.minimumWidth: 400 + Layout.maximumWidth: 400 + Layout.alignment: Qt.AlignRight + model: [ qsTr("When chatting with LocalDocs"), qsTr("Whenever possible"), qsTr("Never") ] + Accessible.name: suggestionModeLabel.text + Accessible.description: suggestionModeLabel.helpText + onActivated: { + MySettings.suggestionMode = suggestionModeBox.currentIndex; + } + Component.onCompleted: { + suggestionModeBox.currentIndex = MySettings.suggestionMode; + } + } MySettingsLabel { id: modelPathLabel text: qsTr("Download Path") helpText: qsTr("Where to store local models and the LocalDocs database.") - Layout.row: 6 + Layout.row: 7 Layout.column: 0 } RowLayout { - Layout.row: 6 + Layout.row: 7 Layout.column: 2 Layout.alignment: Qt.AlignRight Layout.minimumWidth: 400 @@ -273,12 +297,12 @@ MySettingsTab { id: dataLakeLabel text: qsTr("Enable Datalake") helpText: qsTr("Send chats and feedback to the GPT4All Open-Source Datalake.") - Layout.row: 7 + Layout.row: 8 Layout.column: 0 } MyCheckBox { id: dataLakeBox - Layout.row: 7 + Layout.row: 8 Layout.column: 2 Layout.alignment: Qt.AlignRight Component.onCompleted: { dataLakeBox.checked = MySettings.networkIsActive; } @@ -296,7 +320,7 @@ MySettingsTab { } ColumnLayout { - Layout.row: 8 + Layout.row: 9 Layout.column: 0 Layout.columnSpan: 3 Layout.fillWidth: true @@ -319,7 +343,7 @@ MySettingsTab { id: nThreadsLabel text: qsTr("CPU Threads") helpText: qsTr("The number of CPU threads used for inference and embedding.") - Layout.row: 9 + Layout.row: 10 Layout.column: 0 } MyTextField { @@ -327,7 +351,7 @@ MySettingsTab { color: theme.textColor font.pixelSize: theme.fontSizeLarge Layout.alignment: Qt.AlignRight - Layout.row: 9 + Layout.row: 10 Layout.column: 2 Layout.minimumWidth: 200 Layout.maximumWidth: 200 @@ -351,12 +375,12 @@ MySettingsTab { id: saveChatsContextLabel text: qsTr("Save Chat Context") helpText: qsTr("Save the chat model's state to disk for faster loading. WARNING: Uses ~2GB per chat.") - Layout.row: 10 + Layout.row: 11 Layout.column: 0 } MyCheckBox { id: saveChatsContextBox - Layout.row: 10 + Layout.row: 11 Layout.column: 2 Layout.alignment: Qt.AlignRight checked: MySettings.saveChatsContext @@ -368,12 +392,12 @@ MySettingsTab { id: serverChatLabel text: qsTr("Enable Local Server") helpText: qsTr("Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage.") - Layout.row: 11 + Layout.row: 12 Layout.column: 0 } MyCheckBox { id: serverChatBox - Layout.row: 11 + Layout.row: 12 Layout.column: 2 Layout.alignment: Qt.AlignRight checked: MySettings.serverChat @@ -385,7 +409,7 @@ MySettingsTab { id: serverPortLabel text: qsTr("API Server Port") helpText: qsTr("The port to use for the local server. Requires restart.") - Layout.row: 12 + Layout.row: 13 Layout.column: 0 } MyTextField { @@ -393,7 +417,7 @@ MySettingsTab { text: MySettings.networkPort color: theme.textColor font.pixelSize: theme.fontSizeLarge - Layout.row: 12 + Layout.row: 13 Layout.column: 2 Layout.minimumWidth: 200 Layout.maximumWidth: 200 diff --git a/gpt4all-chat/qml/ChatView.qml b/gpt4all-chat/qml/ChatView.qml index fc5188a9..124bf32c 100644 --- a/gpt4all-chat/qml/ChatView.qml +++ b/gpt4all-chat/qml/ChatView.qml @@ -797,7 +797,7 @@ Rectangle { delegate: GridLayout { width: listView.contentItem.width - 15 - rows: 3 + rows: 5 columns: 2 Item { @@ -850,6 +850,8 @@ Rectangle { font.pixelSize: theme.fontSizeLarger font.bold: true color: theme.conversationHeader + enabled: false + focus: false readOnly: true } Text { @@ -872,6 +874,7 @@ Rectangle { case Chat.LocalDocsProcessing: return qsTr("searching localdocs: ") + currentChat.collectionList.join(", ") + " ..."; case Chat.PromptProcessing: return qsTr("processing ...") case Chat.ResponseGeneration: return qsTr("generating response ..."); + case Chat.GeneratingQuestions: return qsTr("generating questions ..."); default: return ""; // handle unexpected values } } @@ -1094,7 +1097,16 @@ Rectangle { Layout.alignment: Qt.AlignVCenter Layout.preferredWidth: childrenRect.width Layout.preferredHeight: childrenRect.height - visible: consolidatedSources.length !== 0 && MySettings.localDocsShowReferences && (!currentResponse || !currentChat.responseInProgress) + visible: { + if (consolidatedSources.length === 0) + return false + if (!MySettings.localDocsShowReferences) + return false + if (currentResponse && currentChat.responseInProgress + && currentChat.responseState !== Chat.GeneratingQuestions ) + return false + return true + } MyButton { backgroundColor: theme.sourcesBackground @@ -1171,7 +1183,16 @@ Rectangle { Layout.row: 3 Layout.column: 1 Layout.topMargin: 5 - visible: consolidatedSources.length !== 0 && MySettings.localDocsShowReferences && (!currentResponse || !currentChat.responseInProgress) + visible: { + if (consolidatedSources.length === 0) + return false + if (!MySettings.localDocsShowReferences) + return false + if (currentResponse && currentChat.responseInProgress + && currentChat.responseState !== Chat.GeneratingQuestions ) + return false + return true + } clip: true Layout.fillWidth: true Layout.preferredHeight: 0 @@ -1310,45 +1331,250 @@ Rectangle { } } } - } - property bool shouldAutoScroll: true - property bool isAutoScrolling: false + function shouldShowSuggestions() { + if (!currentResponse) + return false; + if (MySettings.suggestionMode === 2) // Off + return false; + if (MySettings.suggestionMode === 0 && consolidatedSources.length === 0) // LocalDocs only + return false; + return currentChat.responseState === Chat.GeneratingQuestions || currentChat.generatedQuestions.length !== 0; + } - Connections { - target: currentChat - function onResponseChanged() { - listView.scrollToEnd() + Item { + visible: shouldShowSuggestions() + Layout.row: 4 + Layout.column: 0 + Layout.topMargin: 20 + Layout.alignment: Qt.AlignVCenter | Qt.AlignRight + Layout.preferredWidth: 28 + Layout.preferredHeight: 28 + Image { + id: stack + sourceSize: Qt.size(28, 28) + fillMode: Image.PreserveAspectFit + mipmap: true + visible: false + source: "qrc:/gpt4all/icons/stack.svg" + } + + ColorOverlay { + anchors.fill: stack + source: stack + color: theme.conversationHeader + } + } + + Item { + visible: shouldShowSuggestions() + Layout.row: 4 + Layout.column: 1 + Layout.topMargin: 20 + Layout.fillWidth: true + Layout.preferredHeight: 38 + RowLayout { + spacing: 5 + anchors.left: parent.left + anchors.top: parent.top + anchors.bottom: parent.bottom + + TextArea { + text: qsTr("Suggested follow-ups") + padding: 0 + font.pixelSize: theme.fontSizeLarger + font.bold: true + color: theme.conversationHeader + enabled: false + focus: false + readOnly: true + } + } + } + + ColumnLayout { + visible: shouldShowSuggestions() + Layout.row: 5 + Layout.column: 1 + Layout.fillWidth: true + Layout.minimumHeight: 1 + spacing: 10 + Repeater { + model: currentChat.generatedQuestions + TextArea { + id: followUpText + Layout.fillWidth: true + Layout.alignment: Qt.AlignLeft + rightPadding: 40 + topPadding: 10 + leftPadding: 20 + bottomPadding: 10 + text: modelData + focus: false + readOnly: true + wrapMode: Text.WordWrap + hoverEnabled: !currentChat.responseInProgress + color: theme.textColor + font.pixelSize: theme.fontSizeLarge + background: Rectangle { + color: hovered ? theme.sourcesBackgroundHovered : theme.sourcesBackground + radius: 10 + } + MouseArea { + id: maFollowUp + anchors.fill: parent + enabled: !currentChat.responseInProgress + onClicked: function() { + var chat = window.currentChat + var followup = modelData + chat.stopGenerating() + chat.newPromptResponsePair(followup); + chat.prompt(followup, + MySettings.promptTemplate, + MySettings.maxLength, + MySettings.topK, + MySettings.topP, + MySettings.minP, + MySettings.temperature, + MySettings.promptBatchSize, + MySettings.repeatPenalty, + MySettings.repeatPenaltyTokens) + } + } + Item { + anchors.right: parent.right + anchors.verticalCenter: parent.verticalCenter + width: 40 + height: 40 + visible: !currentChat.responseInProgress + Image { + id: plusImage + anchors.verticalCenter: parent.verticalCenter + sourceSize.width: 20 + sourceSize.height: 20 + mipmap: true + visible: false + source: "qrc:/gpt4all/icons/plus.svg" + } + + ColorOverlay { + anchors.fill: plusImage + source: plusImage + color: theme.styledTextColor + } + } + } + } + + Rectangle { + Layout.fillWidth: true + color: "transparent" + radius: 10 + Layout.preferredHeight: currentChat.responseInProgress ? 40 : 0 + clip: true + ColumnLayout { + id: followUpLayout + anchors.fill: parent + Rectangle { + id: myRect1 + Layout.preferredWidth: 0 + Layout.minimumWidth: 0 + Layout.maximumWidth: parent.width + height: 12 + color: theme.sourcesBackgroundHovered + } + + Rectangle { + id: myRect2 + Layout.preferredWidth: 0 + Layout.minimumWidth: 0 + Layout.maximumWidth: parent.width + height: 12 + color: theme.sourcesBackgroundHovered + } + + SequentialAnimation { + id: followUpProgressAnimation + ParallelAnimation { + PropertyAnimation { + target: myRect1 + property: "Layout.preferredWidth" + from: 0 + to: followUpLayout.width + duration: 1000 + } + PropertyAnimation { + target: myRect2 + property: "Layout.preferredWidth" + from: 0 + to: followUpLayout.width / 2 + duration: 1000 + } + } + SequentialAnimation { + loops: Animation.Infinite + ParallelAnimation { + PropertyAnimation { + target: myRect1 + property: "opacity" + from: 1 + to: 0.2 + duration: 1500 + } + PropertyAnimation { + target: myRect2 + property: "opacity" + from: 1 + to: 0.2 + duration: 1500 + } + } + ParallelAnimation { + PropertyAnimation { + target: myRect1 + property: "opacity" + from: 0.2 + to: 1 + duration: 1500 + } + PropertyAnimation { + target: myRect2 + property: "opacity" + from: 0.2 + to: 1 + duration: 1500 + } + } + } + } + + onVisibleChanged: { + if (visible) + followUpProgressAnimation.start(); + } + } + + Behavior on Layout.preferredHeight { + NumberAnimation { + duration: 300 + easing.type: Easing.InOutQuad + } + } + } } } function scrollToEnd() { - if (listView.shouldAutoScroll) { - listView.isAutoScrolling = true - listView.positionViewAtEnd() - listView.isAutoScrolling = false - } + listView.positionViewAtEnd() } - onContentYChanged: { - if (!isAutoScrolling) - shouldAutoScroll = atYEnd - } - - Component.onCompleted: { - shouldAutoScroll = true - positionViewAtEnd() - } - - footer: Item { - id: bottomPadding - width: parent.width - height: 0 + onContentHeightChanged: { + if (atYEnd) + scrollToEnd() } } } } - } Rectangle { diff --git a/gpt4all-chat/qml/ModelSettings.qml b/gpt4all-chat/qml/ModelSettings.qml index ad3789b4..dba06b2d 100644 --- a/gpt4all-chat/qml/ModelSettings.qml +++ b/gpt4all-chat/qml/ModelSettings.qml @@ -250,45 +250,64 @@ MySettingsTab { } } + MySettingsLabel { + id: chatNamePromptLabel + text: qsTr("Chat Name Prompt") + helpText: qsTr("Prompt used to automatically generate chat names.") + Layout.row: 11 + Layout.column: 0 + Layout.topMargin: 15 + } + Rectangle { - id: optionalImageRect - visible: false // FIXME: for later - Layout.row: 2 - Layout.column: 1 - Layout.rowSpan: 5 - Layout.alignment: Qt.AlignHCenter - Layout.fillHeight: true - Layout.maximumWidth: height - Layout.topMargin: 35 - Layout.bottomMargin: 35 - Layout.leftMargin: 35 - width: 3000 - radius: 10 + id: chatNamePrompt + Layout.row: 12 + Layout.column: 0 + Layout.columnSpan: 2 + Layout.fillWidth: true + Layout.minimumHeight: Math.max(100, chatNamePromptTextArea.contentHeight + 20) color: "transparent" - Item { - anchors.centerIn: parent - height: childrenRect.height - Image { - id: img - anchors.horizontalCenter: parent.horizontalCenter - width: 100 - height: 100 - source: "qrc:/gpt4all/icons/image.svg" - } - Text { - text: qsTr("Add\noptional image") - font.pixelSize: theme.fontSizeLarge - anchors.top: img.bottom - anchors.horizontalCenter: parent.horizontalCenter - wrapMode: TextArea.Wrap - horizontalAlignment: Qt.AlignHCenter - color: theme.mutedTextColor - } + clip: true + MyTextArea { + id: chatNamePromptTextArea + anchors.fill: parent + text: root.currentModelInfo.chatNamePrompt + Accessible.role: Accessible.EditableText + Accessible.name: chatNamePromptLabel.text + Accessible.description: chatNamePromptLabel.text + } + } + + MySettingsLabel { + id: suggestedFollowUpPromptLabel + text: qsTr("Suggested FollowUp Prompt") + helpText: qsTr("Prompt used to generate suggested follow-up questions.") + Layout.row: 13 + Layout.column: 0 + Layout.topMargin: 15 + } + + Rectangle { + id: suggestedFollowUpPrompt + Layout.row: 14 + Layout.column: 0 + Layout.columnSpan: 2 + Layout.fillWidth: true + Layout.minimumHeight: Math.max(100, suggestedFollowUpPromptTextArea.contentHeight + 20) + color: "transparent" + clip: true + MyTextArea { + id: suggestedFollowUpPromptTextArea + anchors.fill: parent + text: root.currentModelInfo.suggestedFollowUpPrompt + Accessible.role: Accessible.EditableText + Accessible.name: suggestedFollowUpPromptLabel.text + Accessible.description: suggestedFollowUpPromptLabel.text } } GridLayout { - Layout.row: 11 + Layout.row: 15 Layout.column: 0 Layout.columnSpan: 2 Layout.topMargin: 15 @@ -784,7 +803,7 @@ MySettingsTab { } Rectangle { - Layout.row: 12 + Layout.row: 16 Layout.column: 0 Layout.columnSpan: 2 Layout.topMargin: 15