mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Add prompt_lookup_num_tokens parameter (#5296)
This commit is contained in:
parent
952a05a7c8
commit
e055967974
@ -37,6 +37,7 @@ class GenerationOptions(BaseModel):
|
|||||||
early_stopping: bool = False
|
early_stopping: bool = False
|
||||||
truncation_length: int = 0
|
truncation_length: int = 0
|
||||||
max_tokens_second: int = 0
|
max_tokens_second: int = 0
|
||||||
|
prompt_lookup_num_tokens: int = 0
|
||||||
custom_token_bans: str = ""
|
custom_token_bans: str = ""
|
||||||
auto_max_new_tokens: bool = False
|
auto_max_new_tokens: bool = False
|
||||||
ban_eos_token: bool = False
|
ban_eos_token: bool = False
|
||||||
|
@ -192,6 +192,7 @@ def transformers_samplers():
|
|||||||
'add_bos_token',
|
'add_bos_token',
|
||||||
'skip_special_tokens',
|
'skip_special_tokens',
|
||||||
'auto_max_new_tokens',
|
'auto_max_new_tokens',
|
||||||
|
'prompt_lookup_num_tokens'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ settings = {
|
|||||||
'truncation_length_max': 200000,
|
'truncation_length_max': 200000,
|
||||||
'max_tokens_second': 0,
|
'max_tokens_second': 0,
|
||||||
'max_updates_second': 0,
|
'max_updates_second': 0,
|
||||||
|
'prompt_lookup_num_tokens': 0,
|
||||||
'custom_stopping_strings': '',
|
'custom_stopping_strings': '',
|
||||||
'custom_token_bans': '',
|
'custom_token_bans': '',
|
||||||
'auto_max_new_tokens': False,
|
'auto_max_new_tokens': False,
|
||||||
|
@ -291,6 +291,9 @@ def generate_reply_HF(question, original_question, seed, state, stopping_strings
|
|||||||
if state['negative_prompt'] != '':
|
if state['negative_prompt'] != '':
|
||||||
generate_params['negative_prompt_ids'] = encode(state['negative_prompt'])
|
generate_params['negative_prompt_ids'] = encode(state['negative_prompt'])
|
||||||
|
|
||||||
|
if state['prompt_lookup_num_tokens'] > 0:
|
||||||
|
generate_params['prompt_lookup_num_tokens'] = state['prompt_lookup_num_tokens']
|
||||||
|
|
||||||
for k in ['epsilon_cutoff', 'eta_cutoff']:
|
for k in ['epsilon_cutoff', 'eta_cutoff']:
|
||||||
if state[k] > 0:
|
if state[k] > 0:
|
||||||
generate_params[k] = state[k] * 1e-4
|
generate_params[k] = state[k] * 1e-4
|
||||||
|
@ -112,6 +112,7 @@ def list_interface_input_elements():
|
|||||||
'auto_max_new_tokens',
|
'auto_max_new_tokens',
|
||||||
'max_tokens_second',
|
'max_tokens_second',
|
||||||
'max_updates_second',
|
'max_updates_second',
|
||||||
|
'prompt_lookup_num_tokens',
|
||||||
'seed',
|
'seed',
|
||||||
'temperature',
|
'temperature',
|
||||||
'temperature_last',
|
'temperature_last',
|
||||||
|
@ -72,6 +72,7 @@ def create_ui(default_preset):
|
|||||||
shared.gradio['truncation_length'] = gr.Slider(value=get_truncation_length(), minimum=shared.settings['truncation_length_min'], maximum=shared.settings['truncation_length_max'], step=256, label='Truncate the prompt up to this length', info='The leftmost tokens are removed if the prompt exceeds this length. Most models require this to be at most 2048.')
|
shared.gradio['truncation_length'] = gr.Slider(value=get_truncation_length(), minimum=shared.settings['truncation_length_min'], maximum=shared.settings['truncation_length_max'], step=256, label='Truncate the prompt up to this length', info='The leftmost tokens are removed if the prompt exceeds this length. Most models require this to be at most 2048.')
|
||||||
shared.gradio['max_tokens_second'] = gr.Slider(value=shared.settings['max_tokens_second'], minimum=0, maximum=20, step=1, label='Maximum tokens/second', info='To make text readable in real time.')
|
shared.gradio['max_tokens_second'] = gr.Slider(value=shared.settings['max_tokens_second'], minimum=0, maximum=20, step=1, label='Maximum tokens/second', info='To make text readable in real time.')
|
||||||
shared.gradio['max_updates_second'] = gr.Slider(value=shared.settings['max_updates_second'], minimum=0, maximum=24, step=1, label='Maximum UI updates/second', info='Set this if you experience lag in the UI during streaming.')
|
shared.gradio['max_updates_second'] = gr.Slider(value=shared.settings['max_updates_second'], minimum=0, maximum=24, step=1, label='Maximum UI updates/second', info='Set this if you experience lag in the UI during streaming.')
|
||||||
|
shared.gradio['prompt_lookup_num_tokens'] = gr.Slider(value=shared.settings['prompt_lookup_num_tokens'], minimum=0, maximum=10, step=1, label='prompt_lookup_num_tokens', info='Activates Prompt Lookup Decoding.')
|
||||||
|
|
||||||
shared.gradio['custom_stopping_strings'] = gr.Textbox(lines=1, value=shared.settings["custom_stopping_strings"] or None, label='Custom stopping strings', info='In addition to the defaults. Written between "" and separated by commas.', placeholder='"\\n", "\\nYou:"')
|
shared.gradio['custom_stopping_strings'] = gr.Textbox(lines=1, value=shared.settings["custom_stopping_strings"] or None, label='Custom stopping strings', info='In addition to the defaults. Written between "" and separated by commas.', placeholder='"\\n", "\\nYou:"')
|
||||||
shared.gradio['custom_token_bans'] = gr.Textbox(value=shared.settings['custom_token_bans'] or None, label='Custom token bans', info='Specific token IDs to ban from generating, comma-separated. The IDs can be found in the Default or Notebook tab.')
|
shared.gradio['custom_token_bans'] = gr.Textbox(value=shared.settings['custom_token_bans'] or None, label='Custom token bans', info='Specific token IDs to ban from generating, comma-separated. The IDs can be found in the Default or Notebook tab.')
|
||||||
|
@ -16,6 +16,7 @@ truncation_length_min: 0
|
|||||||
truncation_length_max: 200000
|
truncation_length_max: 200000
|
||||||
max_tokens_second: 0
|
max_tokens_second: 0
|
||||||
max_updates_second: 0
|
max_updates_second: 0
|
||||||
|
prompt_lookup_num_tokens: 0
|
||||||
custom_stopping_strings: ''
|
custom_stopping_strings: ''
|
||||||
custom_token_bans: ''
|
custom_token_bans: ''
|
||||||
auto_max_new_tokens: false
|
auto_max_new_tokens: false
|
||||||
|
Loading…
Reference in New Issue
Block a user