mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Move stuff into tabs in chat mode
This commit is contained in:
parent
a55e8836f6
commit
ccf10db60f
20
server.py
20
server.py
@ -427,6 +427,7 @@ def get_available_softprompts():
|
||||
def create_extensions_block():
|
||||
extensions_ui_elements = []
|
||||
default_values = []
|
||||
if not args.chat or args.cai_chat:
|
||||
gr.Markdown('## Extensions parameters')
|
||||
for ext in sorted(extension_state, key=lambda x : extension_state[x][1]):
|
||||
if extension_state[ext][0] == True:
|
||||
@ -893,14 +894,7 @@ if args.chat or args.cai_chat:
|
||||
with gr.Row():
|
||||
picture_select = gr.Image(label="Send a picture", type='pil')
|
||||
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
max_new_tokens = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens'])
|
||||
with gr.Column():
|
||||
chat_prompt_size_slider = gr.Slider(minimum=settings['chat_prompt_size_min'], maximum=settings['chat_prompt_size_max'], step=1, label='Maximum prompt size in tokens', value=settings['chat_prompt_size'])
|
||||
|
||||
preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping = create_settings_menus()
|
||||
|
||||
with gr.Tab("Chat settings"):
|
||||
name1 = gr.Textbox(value=settings[f'name1{suffix}'], lines=1, label='Your name')
|
||||
name2 = gr.Textbox(value=settings[f'name2{suffix}'], lines=1, label='Bot\'s name')
|
||||
context = gr.Textbox(value=settings[f'context{suffix}'], lines=2, label='Context')
|
||||
@ -934,7 +928,17 @@ if args.chat or args.cai_chat:
|
||||
with gr.Tab('Upload TavernAI Character Card'):
|
||||
upload_img_tavern = gr.File(type='binary')
|
||||
|
||||
with gr.Tab("Generation settings"):
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
max_new_tokens = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens'])
|
||||
with gr.Column():
|
||||
chat_prompt_size_slider = gr.Slider(minimum=settings['chat_prompt_size_min'], maximum=settings['chat_prompt_size_max'], step=1, label='Maximum prompt size in tokens', value=settings['chat_prompt_size'])
|
||||
|
||||
preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping = create_settings_menus()
|
||||
|
||||
if args.extensions is not None:
|
||||
with gr.Tab("Extensions"):
|
||||
create_extensions_block()
|
||||
|
||||
input_params = [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size_slider]
|
||||
|
Loading…
Reference in New Issue
Block a user