Merge branch 'main' into feature/llamacpp

This commit is contained in:
oobabooga 2023-03-31 14:37:04 -03:00 committed by GitHub
commit 4d98623041
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 43 additions and 23 deletions

View File

@ -240,4 +240,4 @@ Before reporting a bug, make sure that you have:
- Gradio dropdown menu refresh button, code for reloading the interface: https://github.com/AUTOMATIC1111/stable-diffusion-webui
- Verbose preset: Anonymous 4chan user.
- NovelAI and KoboldAI presets: https://github.com/KoboldAI/KoboldAI-Client/wiki/Settings-Presets
- Pygmalion preset, code for early stopping in chat mode, code for some of the sliders, --chat mode colors: https://github.com/PygmalionAI/gradio-ui/
- Code for early stopping in chat mode, code for some of the sliders: https://github.com/PygmalionAI/gradio-ui/

View File

@ -2,19 +2,29 @@ from pathlib import Path
import gradio as gr
from modules.chat import load_character
from modules.html_generator import get_image_cache
from modules.shared import gradio, settings
def generate_html():
def generate_css():
css = """
.character-gallery {
.character-gallery > .gallery {
margin: 1rem 0;
display: grid;
display: grid !important;
grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
grid-column-gap: 0.4rem;
grid-row-gap: 1.2rem;
}
.character-gallery > .label {
display: none !important;
}
.character-gallery button.gallery-item {
display: contents;
}
.character-container {
cursor: pointer;
text-align: center;
@ -45,14 +55,16 @@ def generate_html():
overflow-wrap: anywhere;
}
"""
return css
container_html = f'<style>{css}</style><div class="character-gallery">'
def generate_html():
cards = []
# Iterate through files in image folder
for file in sorted(Path("characters").glob("*")):
if file.name.endswith(".json"):
character = file.name.replace(".json", "")
container_html += f'<div class="character-container" onclick=\'document.getElementById("character-menu").children[1].children[1].value = "{character}"; document.getElementById("character-menu").children[1].children[1].dispatchEvent(new Event("change"));\'>'
container_html = f'<div class="character-container">'
image_html = "<div class='placeholder'></div>"
for i in [
@ -71,12 +83,24 @@ def generate_html():
container_html += f'{image_html} <span class="character-name">{character}</span>'
container_html += "</div>"
cards.append([container_html, character])
return cards
def select_character(evt: gr.SelectData):
return (evt.value[1])
container_html += "</div>"
return container_html
def ui():
with gr.Accordion("Character gallery", open=False):
update = gr.Button("Refresh")
gallery = gr.HTML(value=generate_html())
gr.HTML(value="<style>"+generate_css()+"</style>")
gallery = gr.Dataset(components=[gr.HTML(visible=False)],
label="",
samples=generate_html(),
elem_classes=["character-gallery"],
samples_per_page=50
)
update.click(generate_html, [], gallery)
gallery.select(select_character, None, gradio['character_menu'])

View File

View File

@ -12,7 +12,7 @@ import modules.shared as shared
# Copied from https://github.com/PygmalionAI/gradio-ui/
class _SentinelTokenStoppingCriteria(transformers.StoppingCriteria):
def __init__(self, sentinel_token_ids: list[torch.LongTensor], starting_idx: int):
def __init__(self, sentinel_token_ids: list, starting_idx: int):
transformers.StoppingCriteria.__init__(self)
self.sentinel_token_ids = sentinel_token_ids
self.starting_idx = starting_idx

View File

@ -27,6 +27,7 @@ settings = {
'max_new_tokens': 200,
'max_new_tokens_min': 1,
'max_new_tokens_max': 2000,
'seed': -1,
'name1': 'You',
'name2': 'Assistant',
'context': 'This is a conversation with your Assistant. The Assistant is very helpful and is eager to chat with you and answer your questions.',
@ -41,7 +42,7 @@ settings = {
'chat_default_extensions': ["gallery"],
'presets': {
'default': 'NovelAI-Sphinx Moth',
'.*pygmalion': 'Pygmalion',
'.*pygmalion': 'NovelAI-Storywriter',
'.*RWKV': 'Naive',
},
'prompts': {

View File

@ -1,6 +0,0 @@
do_sample=True
top_p=0.9
top_k=0
temperature=0.5
repetition_penalty=1.1
typical_p=1.0

View File

@ -1,7 +1,7 @@
accelerate==0.18.0
bitsandbytes==0.37.2
flexgen==0.1.7
gradio==3.23.0
gradio==3.24.0
llamacpp==0.1.10
markdown
numpy

View File

@ -166,7 +166,7 @@ def create_settings_menus(default_preset):
with gr.Column():
create_model_and_preset_menus()
with gr.Column():
shared.gradio['seed'] = gr.Number(value=-1, label='Seed (-1 for random)')
shared.gradio['seed'] = gr.Number(value=shared.settings['seed'], label='Seed (-1 for random)')
with gr.Row():
with gr.Column():
@ -408,7 +408,7 @@ def create_interface():
with gr.Row():
with gr.Column(scale=4):
with gr.Tab('Raw'):
shared.gradio['textbox'] = gr.Textbox(value=default_text, elem_id="textbox", lines=25)
shared.gradio['textbox'] = gr.Textbox(value=default_text, elem_id="textbox", lines=27)
with gr.Tab('Markdown'):
shared.gradio['markdown'] = gr.Markdown()
with gr.Tab('HTML'):
@ -442,7 +442,7 @@ def create_interface():
with gr.Tab("Text generation", elem_id="main"):
with gr.Row():
with gr.Column():
shared.gradio['textbox'] = gr.Textbox(value=default_text, lines=15, label='Input')
shared.gradio['textbox'] = gr.Textbox(value=default_text, lines=21, label='Input')
shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
shared.gradio['Generate'] = gr.Button('Generate')
with gr.Row():
@ -455,7 +455,7 @@ def create_interface():
with gr.Column():
with gr.Tab('Raw'):
shared.gradio['output_textbox'] = gr.Textbox(lines=25, label='Output')
shared.gradio['output_textbox'] = gr.Textbox(lines=27, label='Output')
with gr.Tab('Markdown'):
shared.gradio['markdown'] = gr.Markdown()
with gr.Tab('HTML'):

View File

@ -2,6 +2,7 @@
"max_new_tokens": 200,
"max_new_tokens_min": 1,
"max_new_tokens_max": 2000,
"seed": -1,
"name1": "You",
"name2": "Assistant",
"context": "This is a conversation with your Assistant. The Assistant is very helpful and is eager to chat with you and answer your questions.",
@ -18,7 +19,7 @@
],
"presets": {
"default": "NovelAI-Sphinx Moth",
".*pygmalion": "Pygmalion",
".*pygmalion": "NovelAI-Storywriter",
".*RWKV": "Naive"
},
"prompts": {