mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
Remove these as it is mitigated by repeat penalty and models really should train this out.
This commit is contained in:
parent
ef2e1bd4fe
commit
9f323759ce
5
llm.cpp
5
llm.cpp
@ -297,11 +297,6 @@ bool LLMObject::handleResponse(int32_t token, const std::string &response)
|
||||
Q_ASSERT(!response.empty());
|
||||
m_response.append(response);
|
||||
emit responseChanged();
|
||||
|
||||
// Stop generation if we encounter prompt or response tokens
|
||||
QString r = QString::fromStdString(m_response);
|
||||
if (r.contains("### Prompt:") || r.contains("### Response:"))
|
||||
return false;
|
||||
return !m_stopGenerating;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user