Continue to shrink the API space for qml and the backend.

This commit is contained in:
Adam Treat 2023-05-01 12:30:54 -04:00
parent ed59190e48
commit c0d4a9d426
4 changed files with 11 additions and 11 deletions

View File

@ -55,11 +55,6 @@ void Chat::regenerateResponse()
emit regenerateResponseRequested(); // blocking queued connection emit regenerateResponseRequested(); // blocking queued connection
} }
void Chat::resetResponse()
{
emit resetResponseRequested(); // blocking queued connection
}
void Chat::stopGenerating() void Chat::stopGenerating()
{ {
m_llmodel->stopGenerating(); m_llmodel->stopGenerating();
@ -109,6 +104,13 @@ int32_t Chat::threadCount() {
return m_llmodel->threadCount(); return m_llmodel->threadCount();
} }
void Chat::newPromptResponsePair(const QString &prompt)
{
m_chatModel->appendPrompt(tr("Prompt: "), prompt);
m_chatModel->appendResponse(tr("Response: "), prompt);
emit resetResponseRequested(); // blocking queued connection
}
bool Chat::isRecalc() const bool Chat::isRecalc() const
{ {
return m_llmodel->isRecalc(); return m_llmodel->isRecalc();

2
chat.h
View File

@ -34,11 +34,11 @@ public:
Q_INVOKABLE void prompt(const QString &prompt, const QString &prompt_template, int32_t n_predict, int32_t top_k, float top_p, Q_INVOKABLE void prompt(const QString &prompt, const QString &prompt_template, int32_t n_predict, int32_t top_k, float top_p,
float temp, int32_t n_batch, float repeat_penalty, int32_t repeat_penalty_tokens); float temp, int32_t n_batch, float repeat_penalty, int32_t repeat_penalty_tokens);
Q_INVOKABLE void regenerateResponse(); Q_INVOKABLE void regenerateResponse();
Q_INVOKABLE void resetResponse();
Q_INVOKABLE void stopGenerating(); Q_INVOKABLE void stopGenerating();
Q_INVOKABLE void syncThreadCount(); Q_INVOKABLE void syncThreadCount();
Q_INVOKABLE void setThreadCount(int32_t n_threads); Q_INVOKABLE void setThreadCount(int32_t n_threads);
Q_INVOKABLE int32_t threadCount(); Q_INVOKABLE int32_t threadCount();
Q_INVOKABLE void newPromptResponsePair(const QString &prompt);
QString response() const; QString response() const;
bool responseInProgress() const { return m_responseInProgress; } bool responseInProgress() const { return m_responseInProgress; }

View File

@ -101,7 +101,7 @@ public:
return roles; return roles;
} }
Q_INVOKABLE void appendPrompt(const QString &name, const QString &value) void appendPrompt(const QString &name, const QString &value)
{ {
ChatItem item; ChatItem item;
item.name = name; item.name = name;
@ -112,7 +112,7 @@ public:
emit countChanged(); emit countChanged();
} }
Q_INVOKABLE void appendResponse(const QString &name, const QString &prompt) void appendResponse(const QString &name, const QString &prompt)
{ {
ChatItem item; ChatItem item;
item.id = m_chatItems.count(); // This is only relevant for responses item.id = m_chatItems.count(); // This is only relevant for responses

View File

@ -949,9 +949,7 @@ Window {
chatModel.updateCurrentResponse(index, false); chatModel.updateCurrentResponse(index, false);
chatModel.updateValue(index, LLM.currentChat.response); chatModel.updateValue(index, LLM.currentChat.response);
} }
chatModel.appendPrompt(qsTr("Prompt: "), textInput.text); LLM.currentChat.newPromptResponsePair(textInput.text);
chatModel.appendResponse(qsTr("Response: "), textInput.text);
LLM.currentChat.resetResponse()
LLM.currentChat.prompt(textInput.text, settingsDialog.promptTemplate, LLM.currentChat.prompt(textInput.text, settingsDialog.promptTemplate,
settingsDialog.maxLength, settingsDialog.maxLength,
settingsDialog.topK, settingsDialog.topK,