Remove these as it is mitigated by repeat penalty and models really should train this out.

This commit is contained in:
Adam Treat 2023-04-30 08:02:20 -04:00
parent ef2e1bd4fe
commit 9f323759ce

View File

@ -297,11 +297,6 @@ bool LLMObject::handleResponse(int32_t token, const std::string &response)
Q_ASSERT(!response.empty());
m_response.append(response);
emit responseChanged();
// Stop generation if we encounter prompt or response tokens
QString r = QString::fromStdString(m_response);
if (r.contains("### Prompt:") || r.contains("### Response:"))
return false;
return !m_stopGenerating;
}