mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Sort dropdowns numerically
This commit is contained in:
parent
ee3c8a893e
commit
56f6b7052a
@ -14,9 +14,9 @@ from datasets import Dataset, load_dataset
|
|||||||
from peft import (LoraConfig, get_peft_model, prepare_model_for_int8_training,
|
from peft import (LoraConfig, get_peft_model, prepare_model_for_int8_training,
|
||||||
set_peft_model_state_dict)
|
set_peft_model_state_dict)
|
||||||
|
|
||||||
from modules import shared, ui
|
from modules import shared, ui, utils
|
||||||
from modules.evaluate import calculate_perplexity, generate_markdown_table, save_past_evaluations
|
from modules.evaluate import calculate_perplexity, generate_markdown_table, save_past_evaluations
|
||||||
from server import get_available_loras, get_available_models
|
|
||||||
|
|
||||||
# This mapping is from a very recent commit, not yet released.
|
# This mapping is from a very recent commit, not yet released.
|
||||||
# If not available, default to a backup map for some common model types.
|
# If not available, default to a backup map for some common model types.
|
||||||
@ -41,10 +41,6 @@ WANT_INTERRUPT = False
|
|||||||
PARAMETERS = ["lora_name", "always_override", "save_steps", "micro_batch_size", "batch_size", "epochs", "learning_rate", "lr_scheduler_type", "lora_rank", "lora_alpha", "lora_dropout", "cutoff_len", "dataset", "eval_dataset", "format", "eval_steps", "raw_text_file", "overlap_len", "newline_favor_len", "higher_rank_limit", "warmup_steps", "optimizer"]
|
PARAMETERS = ["lora_name", "always_override", "save_steps", "micro_batch_size", "batch_size", "epochs", "learning_rate", "lr_scheduler_type", "lora_rank", "lora_alpha", "lora_dropout", "cutoff_len", "dataset", "eval_dataset", "format", "eval_steps", "raw_text_file", "overlap_len", "newline_favor_len", "higher_rank_limit", "warmup_steps", "optimizer"]
|
||||||
|
|
||||||
|
|
||||||
def get_datasets(path: str, ext: str):
|
|
||||||
return ['None'] + sorted(set([k.stem for k in Path(path).glob(f'*.{ext}') if k.stem != 'put-trainer-datasets-here']), key=str.lower)
|
|
||||||
|
|
||||||
|
|
||||||
def create_train_interface():
|
def create_train_interface():
|
||||||
with gr.Tab('Train LoRA', elem_id='lora-train-tab'):
|
with gr.Tab('Train LoRA', elem_id='lora-train-tab'):
|
||||||
gr.Markdown("Confused? [[Click here for a guide]](https://github.com/oobabooga/text-generation-webui/blob/main/docs/Training-LoRAs.md)")
|
gr.Markdown("Confused? [[Click here for a guide]](https://github.com/oobabooga/text-generation-webui/blob/main/docs/Training-LoRAs.md)")
|
||||||
@ -55,8 +51,8 @@ def create_train_interface():
|
|||||||
save_steps = gr.Number(label='Save every n steps', value=0, info='If above 0, a checkpoint of the LoRA will be saved every time this many steps pass.')
|
save_steps = gr.Number(label='Save every n steps', value=0, info='If above 0, a checkpoint of the LoRA will be saved every time this many steps pass.')
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
copy_from = gr.Dropdown(label='Copy parameters from', value='None', choices=get_available_loras())
|
copy_from = gr.Dropdown(label='Copy parameters from', value='None', choices=utils.get_available_loras())
|
||||||
ui.create_refresh_button(copy_from, lambda: None, lambda: {'choices': get_available_loras()}, 'refresh-button')
|
ui.create_refresh_button(copy_from, lambda: None, lambda: {'choices': utils.get_available_loras()}, 'refresh-button')
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
# TODO: Implement multi-device support.
|
# TODO: Implement multi-device support.
|
||||||
@ -76,19 +72,19 @@ def create_train_interface():
|
|||||||
|
|
||||||
with gr.Tab(label='Formatted Dataset'):
|
with gr.Tab(label='Formatted Dataset'):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
dataset = gr.Dropdown(choices=get_datasets('training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.')
|
dataset = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.')
|
||||||
ui.create_refresh_button(dataset, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'json')}, 'refresh-button')
|
ui.create_refresh_button(dataset, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'json')}, 'refresh-button')
|
||||||
eval_dataset = gr.Dropdown(choices=get_datasets('training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.')
|
eval_dataset = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.')
|
||||||
ui.create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'json')}, 'refresh-button')
|
ui.create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'json')}, 'refresh-button')
|
||||||
format = gr.Dropdown(choices=get_datasets('training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.')
|
format = gr.Dropdown(choices=utils.get_datasets('training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.')
|
||||||
ui.create_refresh_button(format, lambda: None, lambda: {'choices': get_datasets('training/formats', 'json')}, 'refresh-button')
|
ui.create_refresh_button(format, lambda: None, lambda: {'choices': utils.get_datasets('training/formats', 'json')}, 'refresh-button')
|
||||||
|
|
||||||
eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.')
|
eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.')
|
||||||
|
|
||||||
with gr.Tab(label="Raw text file"):
|
with gr.Tab(label="Raw text file"):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
raw_text_file = gr.Dropdown(choices=get_datasets('training/datasets', 'txt'), value='None', label='Text file', info='The raw text file to use for training.')
|
raw_text_file = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'txt'), value='None', label='Text file', info='The raw text file to use for training.')
|
||||||
ui.create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'txt')}, 'refresh-button')
|
ui.create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'txt')}, 'refresh-button')
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
overlap_len = gr.Slider(label='Overlap Length', minimum=0, maximum=512, value=128, step=16, info='Overlap length - ie how many tokens from the prior chunk of text to include into the next chunk. (The chunks themselves will be of a size determined by Cutoff Length below). Setting overlap to exactly half the cutoff length may be ideal.')
|
overlap_len = gr.Slider(label='Overlap Length', minimum=0, maximum=512, value=128, step=16, info='Overlap length - ie how many tokens from the prior chunk of text to include into the next chunk. (The chunks themselves will be of a size determined by Cutoff Length below). Setting overlap to exactly half the cutoff length may be ideal.')
|
||||||
@ -111,8 +107,8 @@ def create_train_interface():
|
|||||||
with gr.Tab('Perplexity evaluation', elem_id='evaluate-tab'):
|
with gr.Tab('Perplexity evaluation', elem_id='evaluate-tab'):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
models = gr.Dropdown(get_available_models(), label='Models', multiselect=True)
|
models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True)
|
||||||
evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + get_datasets('training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The raw text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.')
|
evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + utils.get_datasets('training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The raw text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.')
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
stride_length = gr.Slider(label='Stride', minimum=1, maximum=2048, value=512, step=1, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.')
|
stride_length = gr.Slider(label='Stride', minimum=1, maximum=2048, value=512, step=1, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.')
|
||||||
max_length = gr.Slider(label='max_length', minimum=0, maximum=8096, value=0, step=1, info='The context for each evaluation. If set to 0, the maximum context length for the model will be used.')
|
max_length = gr.Slider(label='max_length', minimum=0, maximum=8096, value=0, step=1, info='The context for each evaluation. If set to 0, the maximum context length for the model will be used.')
|
||||||
|
61
modules/utils.py
Normal file
61
modules/utils.py
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
import os
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from modules import shared
|
||||||
|
|
||||||
|
|
||||||
|
def atoi(text):
|
||||||
|
return int(text) if text.isdigit() else text.lower()
|
||||||
|
|
||||||
|
|
||||||
|
def natural_keys(text):
|
||||||
|
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_models():
|
||||||
|
if shared.args.flexgen:
|
||||||
|
return sorted([re.sub('-np$', '', item.name) for item in list(Path(f'{shared.args.model_dir}/').glob('*')) if item.name.endswith('-np')], key=natural_keys)
|
||||||
|
else:
|
||||||
|
return sorted([re.sub('.pth$', '', item.name) for item in list(Path(f'{shared.args.model_dir}/').glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json', '.yaml'))], key=natural_keys)
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_presets():
|
||||||
|
return sorted(set((k.stem for k in Path('presets').glob('*.txt'))), key=natural_keys)
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_prompts():
|
||||||
|
prompts = []
|
||||||
|
prompts += sorted(set((k.stem for k in Path('prompts').glob('[0-9]*.txt'))), key=natural_keys, reverse=True)
|
||||||
|
prompts += sorted(set((k.stem for k in Path('prompts').glob('*.txt'))), key=natural_keys)
|
||||||
|
prompts += ['None']
|
||||||
|
return prompts
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_characters():
|
||||||
|
paths = (x for x in Path('characters').iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
|
||||||
|
return ['None'] + sorted(set((k.stem for k in paths if k.stem != "instruction-following")), key=natural_keys)
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_instruction_templates():
|
||||||
|
path = "characters/instruction-following"
|
||||||
|
paths = []
|
||||||
|
if os.path.exists(path):
|
||||||
|
paths = (x for x in Path(path).iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
|
||||||
|
return ['None'] + sorted(set((k.stem for k in paths)), key=natural_keys)
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_extensions():
|
||||||
|
return sorted(set(map(lambda x: x.parts[1], Path('extensions').glob('*/script.py'))), key=natural_keys)
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_softprompts():
|
||||||
|
return ['None'] + sorted(set((k.stem for k in Path('softprompts').glob('*.zip'))), key=natural_keys)
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_loras():
|
||||||
|
return sorted([item.name for item in list(Path(shared.args.lora_dir).glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json'))], key=natural_keys)
|
||||||
|
|
||||||
|
|
||||||
|
def get_datasets(path: str, ext: str):
|
||||||
|
return ['None'] + sorted(set([k.stem for k in Path(path).glob(f'*.{ext}') if k.stem != 'put-trainer-datasets-here']), key=natural_keys)
|
85
server.py
85
server.py
@ -43,56 +43,11 @@ import torch
|
|||||||
import yaml
|
import yaml
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
import modules.extensions as extensions_module
|
import modules.extensions as extensions_module
|
||||||
from modules import chat, shared, training, ui
|
from modules import chat, shared, training, ui, utils
|
||||||
from modules.html_generator import chat_html_wrapper
|
from modules.html_generator import chat_html_wrapper
|
||||||
from modules.LoRA import add_lora_to_model
|
from modules.LoRA import add_lora_to_model
|
||||||
from modules.models import load_model, load_soft_prompt, unload_model
|
from modules.models import load_model, load_soft_prompt, unload_model
|
||||||
from modules.text_generation import (encode, generate_reply,
|
from modules.text_generation import encode, generate_reply, stop_everything_event
|
||||||
stop_everything_event)
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_models():
|
|
||||||
if shared.args.flexgen:
|
|
||||||
return sorted([re.sub('-np$', '', item.name) for item in list(Path(f'{shared.args.model_dir}/').glob('*')) if item.name.endswith('-np')], key=str.lower)
|
|
||||||
else:
|
|
||||||
return sorted([re.sub('.pth$', '', item.name) for item in list(Path(f'{shared.args.model_dir}/').glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json', '.yaml'))], key=str.lower)
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_presets():
|
|
||||||
return sorted(set((k.stem for k in Path('presets').glob('*.txt'))), key=str.lower)
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_prompts():
|
|
||||||
prompts = []
|
|
||||||
prompts += sorted(set((k.stem for k in Path('prompts').glob('[0-9]*.txt'))), key=str.lower, reverse=True)
|
|
||||||
prompts += sorted(set((k.stem for k in Path('prompts').glob('*.txt'))), key=str.lower)
|
|
||||||
prompts += ['None']
|
|
||||||
return prompts
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_characters():
|
|
||||||
paths = (x for x in Path('characters').iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
|
|
||||||
return ['None'] + sorted(set((k.stem for k in paths if k.stem != "instruction-following")), key=str.lower)
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_instruction_templates():
|
|
||||||
path = "characters/instruction-following"
|
|
||||||
paths = []
|
|
||||||
if os.path.exists(path):
|
|
||||||
paths = (x for x in Path(path).iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
|
|
||||||
return ['None'] + sorted(set((k.stem for k in paths)), key=str.lower)
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_extensions():
|
|
||||||
return sorted(set(map(lambda x: x.parts[1], Path('extensions').glob('*/script.py'))), key=str.lower)
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_softprompts():
|
|
||||||
return ['None'] + sorted(set((k.stem for k in Path('softprompts').glob('*.zip'))), key=str.lower)
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_loras():
|
|
||||||
return sorted([item.name for item in list(Path(shared.args.lora_dir).glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json'))], key=str.lower)
|
|
||||||
|
|
||||||
|
|
||||||
def load_model_wrapper(selected_model):
|
def load_model_wrapper(selected_model):
|
||||||
@ -324,13 +279,13 @@ def create_model_menus():
|
|||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
shared.gradio['model_menu'] = gr.Dropdown(choices=get_available_models(), value=shared.model_name, label='Model')
|
shared.gradio['model_menu'] = gr.Dropdown(choices=utils.get_available_models(), value=shared.model_name, label='Model')
|
||||||
ui.create_refresh_button(shared.gradio['model_menu'], lambda: None, lambda: {'choices': get_available_models()}, 'refresh-button')
|
ui.create_refresh_button(shared.gradio['model_menu'], lambda: None, lambda: {'choices': utils.get_available_models()}, 'refresh-button')
|
||||||
|
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
shared.gradio['lora_menu'] = gr.Dropdown(multiselect=True, choices=get_available_loras(), value=shared.lora_names, label='LoRA(s)')
|
shared.gradio['lora_menu'] = gr.Dropdown(multiselect=True, choices=utils.get_available_loras(), value=shared.lora_names, label='LoRA(s)')
|
||||||
ui.create_refresh_button(shared.gradio['lora_menu'], lambda: None, lambda: {'choices': get_available_loras(), 'value': shared.lora_names}, 'refresh-button')
|
ui.create_refresh_button(shared.gradio['lora_menu'], lambda: None, lambda: {'choices': utils.get_available_loras(), 'value': shared.lora_names}, 'refresh-button')
|
||||||
|
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
@ -411,8 +366,8 @@ def create_settings_menus(default_preset):
|
|||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
shared.gradio['preset_menu'] = gr.Dropdown(choices=get_available_presets(), value=default_preset if not shared.args.flexgen else 'Naive', label='Generation parameters preset')
|
shared.gradio['preset_menu'] = gr.Dropdown(choices=utils.get_available_presets(), value=default_preset if not shared.args.flexgen else 'Naive', label='Generation parameters preset')
|
||||||
ui.create_refresh_button(shared.gradio['preset_menu'], lambda: None, lambda: {'choices': get_available_presets()}, 'refresh-button')
|
ui.create_refresh_button(shared.gradio['preset_menu'], lambda: None, lambda: {'choices': utils.get_available_presets()}, 'refresh-button')
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
shared.gradio['seed'] = gr.Number(value=shared.settings['seed'], label='Seed (-1 for random)')
|
shared.gradio['seed'] = gr.Number(value=shared.settings['seed'], label='Seed (-1 for random)')
|
||||||
|
|
||||||
@ -459,8 +414,8 @@ def create_settings_menus(default_preset):
|
|||||||
|
|
||||||
with gr.Accordion('Soft prompt', open=False):
|
with gr.Accordion('Soft prompt', open=False):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
shared.gradio['softprompts_menu'] = gr.Dropdown(choices=get_available_softprompts(), value='None', label='Soft prompt')
|
shared.gradio['softprompts_menu'] = gr.Dropdown(choices=utils.get_available_softprompts(), value='None', label='Soft prompt')
|
||||||
ui.create_refresh_button(shared.gradio['softprompts_menu'], lambda: None, lambda: {'choices': get_available_softprompts()}, 'refresh-button')
|
ui.create_refresh_button(shared.gradio['softprompts_menu'], lambda: None, lambda: {'choices': utils.get_available_softprompts()}, 'refresh-button')
|
||||||
|
|
||||||
gr.Markdown('Upload a soft prompt (.zip format):')
|
gr.Markdown('Upload a soft prompt (.zip format):')
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
@ -547,7 +502,7 @@ def create_interface():
|
|||||||
shared.gradio['Clear history-cancel'] = gr.Button('Cancel', visible=False)
|
shared.gradio['Clear history-cancel'] = gr.Button('Cancel', visible=False)
|
||||||
|
|
||||||
shared.gradio['mode'] = gr.Radio(choices=['cai-chat', 'chat', 'instruct'], value=shared.settings['mode'], label='Mode')
|
shared.gradio['mode'] = gr.Radio(choices=['cai-chat', 'chat', 'instruct'], value=shared.settings['mode'], label='Mode')
|
||||||
shared.gradio['instruction_template'] = gr.Dropdown(choices=get_available_instruction_templates(), label='Instruction template', value='None', visible=shared.settings['mode'] == 'instruct', info='Change this according to the model/LoRA that you are using.')
|
shared.gradio['instruction_template'] = gr.Dropdown(choices=utils.get_available_instruction_templates(), label='Instruction template', value='None', visible=shared.settings['mode'] == 'instruct', info='Change this according to the model/LoRA that you are using.')
|
||||||
|
|
||||||
with gr.Tab('Character', elem_id='chat-settings'):
|
with gr.Tab('Character', elem_id='chat-settings'):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
@ -563,8 +518,8 @@ def create_interface():
|
|||||||
shared.gradio['your_picture'] = gr.Image(label='Your picture', type='pil', value=Image.open(Path('cache/pfp_me.png')) if Path('cache/pfp_me.png').exists() else None)
|
shared.gradio['your_picture'] = gr.Image(label='Your picture', type='pil', value=Image.open(Path('cache/pfp_me.png')) if Path('cache/pfp_me.png').exists() else None)
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
shared.gradio['character_menu'] = gr.Dropdown(choices=get_available_characters(), label='Character', elem_id='character-menu')
|
shared.gradio['character_menu'] = gr.Dropdown(choices=utils.get_available_characters(), label='Character', elem_id='character-menu')
|
||||||
ui.create_refresh_button(shared.gradio['character_menu'], lambda: None, lambda: {'choices': get_available_characters()}, 'refresh-button')
|
ui.create_refresh_button(shared.gradio['character_menu'], lambda: None, lambda: {'choices': utils.get_available_characters()}, 'refresh-button')
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Tab('Chat history'):
|
with gr.Tab('Chat history'):
|
||||||
@ -634,8 +589,8 @@ def create_interface():
|
|||||||
gr.HTML('<div style="padding-bottom: 13px"></div>')
|
gr.HTML('<div style="padding-bottom: 13px"></div>')
|
||||||
shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
|
shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
shared.gradio['prompt_menu'] = gr.Dropdown(choices=get_available_prompts(), value='None', label='Prompt')
|
shared.gradio['prompt_menu'] = gr.Dropdown(choices=utils.get_available_prompts(), value='None', label='Prompt')
|
||||||
ui.create_refresh_button(shared.gradio['prompt_menu'], lambda: None, lambda: {'choices': get_available_prompts()}, 'refresh-button')
|
ui.create_refresh_button(shared.gradio['prompt_menu'], lambda: None, lambda: {'choices': utils.get_available_prompts()}, 'refresh-button')
|
||||||
|
|
||||||
shared.gradio['save_prompt'] = gr.Button('Save prompt')
|
shared.gradio['save_prompt'] = gr.Button('Save prompt')
|
||||||
shared.gradio['count_tokens'] = gr.Button('Count tokens')
|
shared.gradio['count_tokens'] = gr.Button('Count tokens')
|
||||||
@ -664,8 +619,8 @@ def create_interface():
|
|||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
shared.gradio['prompt_menu'] = gr.Dropdown(choices=get_available_prompts(), value='None', label='Prompt')
|
shared.gradio['prompt_menu'] = gr.Dropdown(choices=utils.get_available_prompts(), value='None', label='Prompt')
|
||||||
ui.create_refresh_button(shared.gradio['prompt_menu'], lambda: None, lambda: {'choices': get_available_prompts()}, 'refresh-button')
|
ui.create_refresh_button(shared.gradio['prompt_menu'], lambda: None, lambda: {'choices': utils.get_available_prompts()}, 'refresh-button')
|
||||||
|
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
shared.gradio['status'] = gr.Markdown('')
|
shared.gradio['status'] = gr.Markdown('')
|
||||||
@ -705,7 +660,7 @@ def create_interface():
|
|||||||
|
|
||||||
gr.Markdown("*Experimental*")
|
gr.Markdown("*Experimental*")
|
||||||
shared.gradio['interface_modes_menu'] = gr.Dropdown(choices=modes, value=current_mode, label="Mode")
|
shared.gradio['interface_modes_menu'] = gr.Dropdown(choices=modes, value=current_mode, label="Mode")
|
||||||
shared.gradio['extensions_menu'] = gr.CheckboxGroup(choices=get_available_extensions(), value=shared.args.extensions, label="Available extensions")
|
shared.gradio['extensions_menu'] = gr.CheckboxGroup(choices=utils.get_available_extensions(), value=shared.args.extensions, label="Available extensions")
|
||||||
shared.gradio['bool_menu'] = gr.CheckboxGroup(choices=bool_list, value=bool_active, label="Boolean command-line flags")
|
shared.gradio['bool_menu'] = gr.CheckboxGroup(choices=bool_list, value=bool_active, label="Boolean command-line flags")
|
||||||
shared.gradio['reset_interface'] = gr.Button("Apply and restart the interface")
|
shared.gradio['reset_interface'] = gr.Button("Apply and restart the interface")
|
||||||
|
|
||||||
@ -869,7 +824,7 @@ if __name__ == "__main__":
|
|||||||
shared.settings[item] = new_settings[item]
|
shared.settings[item] = new_settings[item]
|
||||||
|
|
||||||
# Default extensions
|
# Default extensions
|
||||||
extensions_module.available_extensions = get_available_extensions()
|
extensions_module.available_extensions = utils.get_available_extensions()
|
||||||
if shared.is_chat():
|
if shared.is_chat():
|
||||||
for extension in shared.settings['chat_default_extensions']:
|
for extension in shared.settings['chat_default_extensions']:
|
||||||
shared.args.extensions = shared.args.extensions or []
|
shared.args.extensions = shared.args.extensions or []
|
||||||
@ -881,7 +836,7 @@ if __name__ == "__main__":
|
|||||||
if extension not in shared.args.extensions:
|
if extension not in shared.args.extensions:
|
||||||
shared.args.extensions.append(extension)
|
shared.args.extensions.append(extension)
|
||||||
|
|
||||||
available_models = get_available_models()
|
available_models = utils.get_available_models()
|
||||||
|
|
||||||
# Model defined through --model
|
# Model defined through --model
|
||||||
if shared.args.model is not None:
|
if shared.args.model is not None:
|
||||||
|
Loading…
Reference in New Issue
Block a user