mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Add epsilon_cutoff/eta_cutoff parameters (#2258)
This commit is contained in:
parent
767a767989
commit
8ac3636966
@ -36,6 +36,8 @@ async def run(user_input, history):
|
|||||||
'temperature': 0.7,
|
'temperature': 0.7,
|
||||||
'top_p': 0.1,
|
'top_p': 0.1,
|
||||||
'typical_p': 1,
|
'typical_p': 1,
|
||||||
|
'epsilon_cutoff': 0, # In units of 1e-4
|
||||||
|
'eta_cutoff': 0, # In units of 1e-4
|
||||||
'repetition_penalty': 1.18,
|
'repetition_penalty': 1.18,
|
||||||
'top_k': 40,
|
'top_k': 40,
|
||||||
'min_length': 0,
|
'min_length': 0,
|
||||||
|
@ -30,6 +30,8 @@ def run(user_input, history):
|
|||||||
'temperature': 0.7,
|
'temperature': 0.7,
|
||||||
'top_p': 0.1,
|
'top_p': 0.1,
|
||||||
'typical_p': 1,
|
'typical_p': 1,
|
||||||
|
'epsilon_cutoff': 0, # In units of 1e-4
|
||||||
|
'eta_cutoff': 0, # In units of 1e-4
|
||||||
'repetition_penalty': 1.18,
|
'repetition_penalty': 1.18,
|
||||||
'top_k': 40,
|
'top_k': 40,
|
||||||
'min_length': 0,
|
'min_length': 0,
|
||||||
|
@ -24,6 +24,8 @@ async def run(context):
|
|||||||
'temperature': 1.3,
|
'temperature': 1.3,
|
||||||
'top_p': 0.1,
|
'top_p': 0.1,
|
||||||
'typical_p': 1,
|
'typical_p': 1,
|
||||||
|
'epsilon_cutoff': 0, # In units of 1e-4
|
||||||
|
'eta_cutoff': 0, # In units of 1e-4
|
||||||
'repetition_penalty': 1.18,
|
'repetition_penalty': 1.18,
|
||||||
'top_k': 40,
|
'top_k': 40,
|
||||||
'min_length': 0,
|
'min_length': 0,
|
||||||
|
@ -16,6 +16,8 @@ def run(prompt):
|
|||||||
'temperature': 1.3,
|
'temperature': 1.3,
|
||||||
'top_p': 0.1,
|
'top_p': 0.1,
|
||||||
'typical_p': 1,
|
'typical_p': 1,
|
||||||
|
'epsilon_cutoff': 0, # In units of 1e-4
|
||||||
|
'eta_cutoff': 0, # In units of 1e-4
|
||||||
'repetition_penalty': 1.18,
|
'repetition_penalty': 1.18,
|
||||||
'top_k': 40,
|
'top_k': 40,
|
||||||
'min_length': 0,
|
'min_length': 0,
|
||||||
|
@ -15,6 +15,8 @@ def build_parameters(body, chat=False):
|
|||||||
'temperature': float(body.get('temperature', 0.5)),
|
'temperature': float(body.get('temperature', 0.5)),
|
||||||
'top_p': float(body.get('top_p', 1)),
|
'top_p': float(body.get('top_p', 1)),
|
||||||
'typical_p': float(body.get('typical_p', body.get('typical', 1))),
|
'typical_p': float(body.get('typical_p', body.get('typical', 1))),
|
||||||
|
'epsilon_cutoff': float(body.get('epsilon_cutoff', 0)),
|
||||||
|
'eta_cutoff': float(body.get('eta_cutoff', 0)),
|
||||||
'repetition_penalty': float(body.get('repetition_penalty', body.get('rep_pen', 1.1))),
|
'repetition_penalty': float(body.get('repetition_penalty', body.get('rep_pen', 1.1))),
|
||||||
'encoder_repetition_penalty': float(body.get('encoder_repetition_penalty', 1.0)),
|
'encoder_repetition_penalty': float(body.get('encoder_repetition_penalty', 1.0)),
|
||||||
'top_k': int(body.get('top_k', 0)),
|
'top_k': int(body.get('top_k', 0)),
|
||||||
|
@ -208,6 +208,8 @@ class Handler(BaseHTTPRequestHandler):
|
|||||||
'add_bos_token': shared.settings.get('add_bos_token', True),
|
'add_bos_token': shared.settings.get('add_bos_token', True),
|
||||||
'do_sample': True,
|
'do_sample': True,
|
||||||
'typical_p': 1.0,
|
'typical_p': 1.0,
|
||||||
|
'epsilon_cutoff': 0, # In units of 1e-4
|
||||||
|
'eta_cutoff': 0, # In units of 1e-4
|
||||||
'min_length': 0,
|
'min_length': 0,
|
||||||
'no_repeat_ngram_size': 0,
|
'no_repeat_ngram_size': 0,
|
||||||
'num_beams': 1,
|
'num_beams': 1,
|
||||||
@ -516,6 +518,8 @@ class Handler(BaseHTTPRequestHandler):
|
|||||||
'add_bos_token': shared.settings.get('add_bos_token', True),
|
'add_bos_token': shared.settings.get('add_bos_token', True),
|
||||||
'do_sample': True,
|
'do_sample': True,
|
||||||
'typical_p': 1.0,
|
'typical_p': 1.0,
|
||||||
|
'epsilon_cutoff': 0, # In units of 1e-4
|
||||||
|
'eta_cutoff': 0, # In units of 1e-4
|
||||||
'min_length': 0,
|
'min_length': 0,
|
||||||
'no_repeat_ngram_size': 0,
|
'no_repeat_ngram_size': 0,
|
||||||
'num_beams': 1,
|
'num_beams': 1,
|
||||||
|
@ -190,6 +190,10 @@ def generate_reply_HF(question, original_question, seed, state, eos_token=None,
|
|||||||
for k in ['max_new_tokens', 'do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']:
|
for k in ['max_new_tokens', 'do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']:
|
||||||
generate_params[k] = state[k]
|
generate_params[k] = state[k]
|
||||||
|
|
||||||
|
for k in ['epsilon_cutoff', 'eta_cutoff']:
|
||||||
|
if state[k] > 0:
|
||||||
|
generate_params[k] = state[k] * 1e-4
|
||||||
|
|
||||||
if state['ban_eos_token']:
|
if state['ban_eos_token']:
|
||||||
generate_params['suppress_tokens'] = [shared.tokenizer.eos_token_id]
|
generate_params['suppress_tokens'] = [shared.tokenizer.eos_token_id]
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ def list_model_elements():
|
|||||||
|
|
||||||
|
|
||||||
def list_interface_input_elements(chat=False):
|
def list_interface_input_elements(chat=False):
|
||||||
elements = ['max_new_tokens', 'seed', 'temperature', 'top_p', 'top_k', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'no_repeat_ngram_size', 'min_length', 'do_sample', 'penalty_alpha', 'num_beams', 'length_penalty', 'early_stopping', 'add_bos_token', 'ban_eos_token', 'truncation_length', 'custom_stopping_strings', 'skip_special_tokens', 'preset_menu', 'stream']
|
elements = ['max_new_tokens', 'seed', 'temperature', 'top_p', 'top_k', 'typical_p', 'epsilon_cutoff', 'eta_cutoff', 'repetition_penalty', 'encoder_repetition_penalty', 'no_repeat_ngram_size', 'min_length', 'do_sample', 'penalty_alpha', 'num_beams', 'length_penalty', 'early_stopping', 'add_bos_token', 'ban_eos_token', 'truncation_length', 'custom_stopping_strings', 'skip_special_tokens', 'preset_menu', 'stream']
|
||||||
if chat:
|
if chat:
|
||||||
elements += ['name1', 'name2', 'greeting', 'context', 'chat_prompt_size', 'chat_generation_attempts', 'stop_at_newline', 'mode', 'instruction_template', 'character_menu', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template', 'chat_style', 'chat-instruct_command']
|
elements += ['name1', 'name2', 'greeting', 'context', 'chat_prompt_size', 'chat_generation_attempts', 'stop_at_newline', 'mode', 'instruction_template', 'character_menu', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template', 'chat_style', 'chat-instruct_command']
|
||||||
|
|
||||||
|
25
server.py
25
server.py
@ -84,6 +84,8 @@ def load_preset_values(preset_menu, state, return_dict=False):
|
|||||||
'temperature': 1,
|
'temperature': 1,
|
||||||
'top_p': 1,
|
'top_p': 1,
|
||||||
'typical_p': 1,
|
'typical_p': 1,
|
||||||
|
'epsilon_cutoff': 0,
|
||||||
|
'eta_cutoff': 0,
|
||||||
'repetition_penalty': 1,
|
'repetition_penalty': 1,
|
||||||
'encoder_repetition_penalty': 1,
|
'encoder_repetition_penalty': 1,
|
||||||
'top_k': 50,
|
'top_k': 50,
|
||||||
@ -100,13 +102,13 @@ def load_preset_values(preset_menu, state, return_dict=False):
|
|||||||
i = i.rstrip(',').strip().split('=')
|
i = i.rstrip(',').strip().split('=')
|
||||||
if len(i) == 2 and i[0].strip() != 'tokens':
|
if len(i) == 2 and i[0].strip() != 'tokens':
|
||||||
generate_params[i[0].strip()] = eval(i[1].strip())
|
generate_params[i[0].strip()] = eval(i[1].strip())
|
||||||
generate_params['temperature'] = min(1.99, generate_params['temperature'])
|
|
||||||
|
|
||||||
|
generate_params['temperature'] = min(1.99, generate_params['temperature'])
|
||||||
if return_dict:
|
if return_dict:
|
||||||
return generate_params
|
return generate_params
|
||||||
else:
|
else:
|
||||||
state.update(generate_params)
|
state.update(generate_params)
|
||||||
return state, *[generate_params[k] for k in ['do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']]
|
return state, *[generate_params[k] for k in ['do_sample', 'temperature', 'top_p', 'typical_p', 'epsilon_cutoff', 'eta_cutoff', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']]
|
||||||
|
|
||||||
|
|
||||||
def upload_soft_prompt(file):
|
def upload_soft_prompt(file):
|
||||||
@ -453,17 +455,24 @@ def create_settings_menus(default_preset):
|
|||||||
shared.gradio['do_sample'] = gr.Checkbox(value=generate_params['do_sample'], label='do_sample')
|
shared.gradio['do_sample'] = gr.Checkbox(value=generate_params['do_sample'], label='do_sample')
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
with gr.Box():
|
with gr.Box():
|
||||||
gr.Markdown('Contrastive search')
|
|
||||||
shared.gradio['penalty_alpha'] = gr.Slider(0, 5, value=generate_params['penalty_alpha'], label='penalty_alpha')
|
|
||||||
|
|
||||||
gr.Markdown('Beam search (uses a lot of VRAM)')
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
|
gr.Markdown('Contrastive search')
|
||||||
|
shared.gradio['penalty_alpha'] = gr.Slider(0, 5, value=generate_params['penalty_alpha'], label='penalty_alpha')
|
||||||
|
|
||||||
|
gr.Markdown('Beam search (uses a lot of VRAM)')
|
||||||
shared.gradio['num_beams'] = gr.Slider(1, 20, step=1, value=generate_params['num_beams'], label='num_beams')
|
shared.gradio['num_beams'] = gr.Slider(1, 20, step=1, value=generate_params['num_beams'], label='num_beams')
|
||||||
shared.gradio['length_penalty'] = gr.Slider(-5, 5, value=generate_params['length_penalty'], label='length_penalty')
|
shared.gradio['length_penalty'] = gr.Slider(-5, 5, value=generate_params['length_penalty'], label='length_penalty')
|
||||||
with gr.Column():
|
|
||||||
shared.gradio['early_stopping'] = gr.Checkbox(value=generate_params['early_stopping'], label='early_stopping')
|
shared.gradio['early_stopping'] = gr.Checkbox(value=generate_params['early_stopping'], label='early_stopping')
|
||||||
|
|
||||||
|
with gr.Column():
|
||||||
|
gr.Markdown('Other')
|
||||||
|
with gr.Row():
|
||||||
|
with gr.Column():
|
||||||
|
shared.gradio['epsilon_cutoff'] = gr.Slider(0, 9, value=generate_params['epsilon_cutoff'], step=0.01, label='epsilon_cutoff', info='In units of 1e-4')
|
||||||
|
with gr.Column():
|
||||||
|
shared.gradio['eta_cutoff'] = gr.Slider(0, 20, value=generate_params['eta_cutoff'], step=0.01, label='eta_cutoff', info='In units of 1e-4')
|
||||||
|
|
||||||
with gr.Box():
|
with gr.Box():
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
@ -485,7 +494,7 @@ def create_settings_menus(default_preset):
|
|||||||
with gr.Row():
|
with gr.Row():
|
||||||
shared.gradio['upload_softprompt'] = gr.File(type='binary', file_types=['.zip'])
|
shared.gradio['upload_softprompt'] = gr.File(type='binary', file_types=['.zip'])
|
||||||
|
|
||||||
shared.gradio['preset_menu'].change(load_preset_values, [shared.gradio[k] for k in ['preset_menu', 'interface_state']], [shared.gradio[k] for k in ['interface_state', 'do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']])
|
shared.gradio['preset_menu'].change(load_preset_values, [shared.gradio[k] for k in ['preset_menu', 'interface_state']], [shared.gradio[k] for k in ['interface_state', 'do_sample', 'temperature', 'top_p', 'typical_p', 'epsilon_cutoff', 'eta_cutoff', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']])
|
||||||
shared.gradio['softprompts_menu'].change(load_soft_prompt, shared.gradio['softprompts_menu'], shared.gradio['softprompts_menu'], show_progress=True)
|
shared.gradio['softprompts_menu'].change(load_soft_prompt, shared.gradio['softprompts_menu'], shared.gradio['softprompts_menu'], show_progress=True)
|
||||||
shared.gradio['upload_softprompt'].upload(upload_soft_prompt, shared.gradio['upload_softprompt'], shared.gradio['softprompts_menu'])
|
shared.gradio['upload_softprompt'].upload(upload_soft_prompt, shared.gradio['upload_softprompt'], shared.gradio['softprompts_menu'])
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user