mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
185587a33e
If too many messages are used in the prompt, the model gets really slow. It is useful to have the ability to limit this.
21 lines
725 B
JSON
21 lines
725 B
JSON
{
|
|
"max_new_tokens": 200,
|
|
"max_new_tokens_min": 1,
|
|
"max_new_tokens_max": 2000,
|
|
"preset": "NovelAI-Sphinx Moth",
|
|
"name1": "Person 1",
|
|
"name2": "Person 2",
|
|
"context": "This is a conversation between two people.",
|
|
"prompt": "Common sense questions and answers\n\nQuestion: \nFactual answer:",
|
|
"prompt_gpt4chan": "-----\n--- 865467536\nInput text\n--- 865467537\n",
|
|
"stop_at_newline": true,
|
|
"history_size": 8,
|
|
"history_size_min": 0,
|
|
"history_size_max": 64,
|
|
"preset_pygmalion": "Pygmalion",
|
|
"name1_pygmalion": "You",
|
|
"name2_pygmalion": "Kawaii",
|
|
"context_pygmalion": "This is a conversation between two people.\n<START>",
|
|
"stop_at_newline_pygmalion": false
|
|
}
|