text-generation-webui/settings-template.json
oobabooga 0f1627eff1 Don't treat Intruct mode histories as regular histories
* They must now be saved/loaded manually
* Also improved browser caching of pfps
* Also changed the global default preset
2023-04-10 15:48:07 -03:00

37 lines
1.1 KiB
JSON

{
"max_new_tokens": 200,
"max_new_tokens_min": 1,
"max_new_tokens_max": 2000,
"seed": -1,
"name1": "You",
"name2": "Assistant",
"context": "This is a conversation with your Assistant. The Assistant is very helpful and is eager to chat with you and answer your questions.",
"greeting": "Hello there!",
"stop_at_newline": false,
"chat_prompt_size": 2048,
"chat_prompt_size_min": 0,
"chat_prompt_size_max": 2048,
"chat_generation_attempts": 1,
"chat_generation_attempts_min": 1,
"chat_generation_attempts_max": 5,
"default_extensions": [],
"chat_default_extensions": [
"gallery"
],
"presets": {
"default": "Default",
".*pygmalion": "NovelAI-Storywriter",
".*RWKV": "Naive"
},
"prompts": {
"default": "QA",
".*(gpt4chan|gpt-4chan|4chan)": "GPT-4chan",
".*oasst": "Open Assistant",
".*alpaca": "Alpaca"
},
"lora_prompts": {
"default": "QA",
".*(alpaca-lora-7b|alpaca-lora-13b|alpaca-lora-30b)": "Alpaca"
}
}