mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Add Eta Sampling preset
Also remove some presets that I do not consider relevant
This commit is contained in:
parent
00ebea0b2a
commit
f27135bdd3
@ -321,7 +321,7 @@ Out of memory errors? [Check the low VRAM guide](docs/Low-VRAM-guide.md).
|
||||
|
||||
Inference settings presets can be created under `presets/` as text files. These files are detected automatically at startup.
|
||||
|
||||
By default, 10 presets by NovelAI and KoboldAI are included. These were selected out of a sample of 43 presets after applying a K-Means clustering algorithm and selecting the elements closest to the average of each cluster.
|
||||
By default, 10 presets based on NovelAI and KoboldAI presets are included. These were selected out of a sample of 43 presets after applying a K-Means clustering algorithm and selecting the elements closest to the average of each cluster.
|
||||
|
||||
[Visualization](https://user-images.githubusercontent.com/112222186/228956352-1addbdb9-2456-465a-b51d-089f462cd385.png)
|
||||
|
||||
@ -345,6 +345,5 @@ Before reporting a bug, make sure that you have:
|
||||
## Credits
|
||||
|
||||
- Gradio dropdown menu refresh button, code for reloading the interface: https://github.com/AUTOMATIC1111/stable-diffusion-webui
|
||||
- Verbose preset: Anonymous 4chan user.
|
||||
- NovelAI and KoboldAI presets: https://github.com/KoboldAI/KoboldAI-Client/wiki/Settings-Presets
|
||||
- Code for early stopping in chat mode, code for some of the sliders: https://github.com/PygmalionAI/gradio-ui/
|
||||
|
@ -1,6 +0,0 @@
|
||||
do_sample: true
|
||||
top_p: 0.5
|
||||
top_k: 40
|
||||
temperature: 0.7
|
||||
repetition_penalty: 1.2
|
||||
typical_p: 1.0
|
@ -1,3 +0,0 @@
|
||||
temperature: 0.7
|
||||
top_p: 0.8
|
||||
repetition_penalty: 1.02
|
2
presets/Special-Eta Sampling.yaml
Normal file
2
presets/Special-Eta Sampling.yaml
Normal file
@ -0,0 +1,2 @@
|
||||
do_sample: true
|
||||
eta_cutoff: 3
|
@ -1,9 +0,0 @@
|
||||
num_beams: 10
|
||||
min_length: 200
|
||||
length_penalty: 1.4
|
||||
no_repeat_ngram_size: 2
|
||||
early_stopping: true
|
||||
temperature: 0.7
|
||||
top_k: 150
|
||||
top_p: 0.92
|
||||
repetition_penalty: 4.5
|
@ -91,7 +91,7 @@ def load_preset_values(preset_menu, state, return_dict=False):
|
||||
'eta_cutoff': 0,
|
||||
'repetition_penalty': 1,
|
||||
'encoder_repetition_penalty': 1,
|
||||
'top_k': 50,
|
||||
'top_k': 0,
|
||||
'num_beams': 1,
|
||||
'penalty_alpha': 0,
|
||||
'min_length': 0,
|
||||
@ -470,8 +470,8 @@ def create_settings_menus(default_preset):
|
||||
shared.gradio['top_p'] = gr.Slider(0.0, 1.0, value=generate_params['top_p'], step=0.01, label='top_p', info='If not set to 1, select tokens with probabilities adding up to less than this number. Higher value = higher range of possible random results.')
|
||||
shared.gradio['top_k'] = gr.Slider(0, 200, value=generate_params['top_k'], step=1, label='top_k', info='Similar to top_p, but select instead only the top_k most likely tokens. Higher value = higher range of possible random results.')
|
||||
shared.gradio['typical_p'] = gr.Slider(0.0, 1.0, value=generate_params['typical_p'], step=0.01, label='typical_p', info='If not set to 1, select only tokens that are at least this much more likely to appear than random tokens, given the prior text.')
|
||||
shared.gradio['epsilon_cutoff'] = gr.Slider(0, 9, value=generate_params['epsilon_cutoff'], step=0.01, label='epsilon_cutoff', info='In units of 1e-4')
|
||||
shared.gradio['eta_cutoff'] = gr.Slider(0, 20, value=generate_params['eta_cutoff'], step=0.01, label='eta_cutoff', info='In units of 1e-4')
|
||||
shared.gradio['epsilon_cutoff'] = gr.Slider(0, 9, value=generate_params['epsilon_cutoff'], step=0.01, label='epsilon_cutoff', info='In units of 1e-4; a reasonable value is 3. This sets a probability floor below which tokens are excluded from being sampled. Should be used with top_p, top_k, and eta_cutoff set to 0.')
|
||||
shared.gradio['eta_cutoff'] = gr.Slider(0, 20, value=generate_params['eta_cutoff'], step=0.01, label='eta_cutoff', info='In units of 1e-4; a reasonable value is 3. Should be used with top_p, top_k, and epsilon_cutoff set to 0.')
|
||||
|
||||
with gr.Column():
|
||||
shared.gradio['repetition_penalty'] = gr.Slider(1.0, 1.5, value=generate_params['repetition_penalty'], step=0.01, label='repetition_penalty', info='Exponential penalty factor for repeating prior tokens. 1 means no penalty, higher value = less repetition, lower value = more repetition.')
|
||||
|
Loading…
Reference in New Issue
Block a user