Add a button for rendering markdown for any model

This commit is contained in:
oobabooga 2023-05-25 11:59:27 -03:00
parent 9a43656a50
commit 37d4ad012b
3 changed files with 10 additions and 9 deletions

View File

@ -15,7 +15,7 @@ Its goal is to become the [AUTOMATIC1111/stable-diffusion-webui](https://github.
* Chat mode for conversation and role-playing * Chat mode for conversation and role-playing
* Instruct mode compatible with various formats, including Alpaca, Vicuna, Open Assistant, Dolly, Koala, ChatGLM, MOSS, RWKV-Raven, Galactica, StableLM, WizardLM, Baize, Ziya, Chinese-Vicuna, MPT, INCITE, Wizard Mega, KoAlpaca, Vigogne, Bactrian, h2o, and OpenBuddy * Instruct mode compatible with various formats, including Alpaca, Vicuna, Open Assistant, Dolly, Koala, ChatGLM, MOSS, RWKV-Raven, Galactica, StableLM, WizardLM, Baize, Ziya, Chinese-Vicuna, MPT, INCITE, Wizard Mega, KoAlpaca, Vigogne, Bactrian, h2o, and OpenBuddy
* [Multimodal pipelines, including LLaVA and MiniGPT-4](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/multimodal) * [Multimodal pipelines, including LLaVA and MiniGPT-4](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/multimodal)
* Markdown output for [GALACTICA](https://github.com/paperswithcode/galai), including LaTeX rendering * Markdown output with LaTeX rendering, to use for instance with [GALACTICA](https://github.com/paperswithcode/galai)
* Nice HTML output for GPT-4chan * Nice HTML output for GPT-4chan
* [Custom chat characters](docs/Chat-mode.md) * [Custom chat characters](docs/Chat-mode.md)
* Advanced chat features (send images, get audio responses with TTS) * Advanced chat features (send images, get audio responses with TTS)

View File

@ -130,14 +130,11 @@ def get_reply_from_output_ids(output_ids, input_ids, original_question, state, i
def formatted_outputs(reply, model_name): def formatted_outputs(reply, model_name):
if shared.model_type == 'galactica': if shared.model_type == 'gpt4chan':
reply = fix_galactica(reply)
return reply, reply, generate_basic_html(reply)
elif shared.model_type == 'gpt4chan':
reply = fix_gpt4chan(reply) reply = fix_gpt4chan(reply)
return reply, 'Only applicable for GALACTICA models.', generate_4chan_html(reply) return reply, generate_4chan_html(reply)
else: else:
return reply, 'Only applicable for GALACTICA models.', generate_basic_html(reply) return reply, generate_basic_html(reply)
def set_manual_seed(seed): def set_manual_seed(seed):

View File

@ -709,6 +709,7 @@ def create_interface():
shared.gradio['textbox'] = gr.Textbox(value=default_text, elem_classes="textbox", lines=27) shared.gradio['textbox'] = gr.Textbox(value=default_text, elem_classes="textbox", lines=27)
with gr.Tab('Markdown'): with gr.Tab('Markdown'):
shared.gradio['markdown_render'] = gr.Button('Render')
shared.gradio['markdown'] = gr.Markdown() shared.gradio['markdown'] = gr.Markdown()
with gr.Tab('HTML'): with gr.Tab('HTML'):
@ -769,6 +770,7 @@ def create_interface():
shared.gradio['output_textbox'] = gr.Textbox(elem_classes="textbox_default_output", lines=27, label='Output') shared.gradio['output_textbox'] = gr.Textbox(elem_classes="textbox_default_output", lines=27, label='Output')
with gr.Tab('Markdown'): with gr.Tab('Markdown'):
shared.gradio['markdown_render'] = gr.Button('Render')
shared.gradio['markdown'] = gr.Markdown() shared.gradio['markdown'] = gr.Markdown()
with gr.Tab('HTML'): with gr.Tab('HTML'):
@ -944,9 +946,9 @@ def create_interface():
else: else:
shared.input_params = [shared.gradio[k] for k in ['textbox', 'interface_state']] shared.input_params = [shared.gradio[k] for k in ['textbox', 'interface_state']]
if shared.args.notebook: if shared.args.notebook:
output_params = [shared.gradio[k] for k in ['textbox', 'markdown', 'html']] output_params = [shared.gradio[k] for k in ['textbox', 'html']]
else: else:
output_params = [shared.gradio[k] for k in ['output_textbox', 'markdown', 'html']] output_params = [shared.gradio[k] for k in ['output_textbox', 'html']]
gen_events.append(shared.gradio['Generate'].click( gen_events.append(shared.gradio['Generate'].click(
lambda x: x, shared.gradio['textbox'], shared.gradio['last_input']).then( lambda x: x, shared.gradio['textbox'], shared.gradio['last_input']).then(
@ -966,6 +968,7 @@ def create_interface():
if shared.args.notebook: if shared.args.notebook:
shared.gradio['Undo'].click(lambda x: x, shared.gradio['last_input'], shared.gradio['textbox'], show_progress=False) shared.gradio['Undo'].click(lambda x: x, shared.gradio['last_input'], shared.gradio['textbox'], show_progress=False)
shared.gradio['markdown_render'].click(lambda x: x, shared.gradio['textbox'], shared.gradio['markdown'], queue=False)
gen_events.append(shared.gradio['Regenerate'].click( gen_events.append(shared.gradio['Regenerate'].click(
lambda x: x, shared.gradio['last_input'], shared.gradio['textbox'], show_progress=False).then( lambda x: x, shared.gradio['last_input'], shared.gradio['textbox'], show_progress=False).then(
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then( ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
@ -974,6 +977,7 @@ def create_interface():
# lambda: None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}") # lambda: None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}")
) )
else: else:
shared.gradio['markdown_render'].click(lambda x: x, shared.gradio['output_textbox'], shared.gradio['markdown'], queue=False)
gen_events.append(shared.gradio['Continue'].click( gen_events.append(shared.gradio['Continue'].click(
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then( ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
generate_reply_wrapper, [shared.gradio['output_textbox']] + shared.input_params[1:], output_params, show_progress=False).then( generate_reply_wrapper, [shared.gradio['output_textbox']] + shared.input_params[1:], output_params, show_progress=False).then(