2023-07-25 17:49:56 -04:00
|
|
|
"""
|
|
|
|
An example of extension. It does nothing, but you can add transformations
|
|
|
|
before the return statements to customize the webui behavior.
|
|
|
|
|
|
|
|
Starting from history_modifier and ending in output_modifier, the
|
|
|
|
functions are declared in the same order that they are called at
|
|
|
|
generation time.
|
|
|
|
"""
|
|
|
|
|
2023-07-29 21:57:22 -04:00
|
|
|
import gradio as gr
|
2023-07-25 17:49:56 -04:00
|
|
|
import torch
|
2023-07-29 21:57:22 -04:00
|
|
|
from transformers import LogitsProcessor
|
|
|
|
|
|
|
|
from modules import chat, shared
|
2023-07-25 17:49:56 -04:00
|
|
|
from modules.text_generation import (
|
|
|
|
decode,
|
|
|
|
encode,
|
|
|
|
generate_reply,
|
|
|
|
)
|
|
|
|
|
|
|
|
params = {
|
|
|
|
"display_name": "Example Extension",
|
|
|
|
"is_tab": False,
|
|
|
|
}
|
|
|
|
|
|
|
|
class MyLogits(LogitsProcessor):
|
|
|
|
"""
|
|
|
|
Manipulates the probabilities for the next token before it gets sampled.
|
2023-07-26 12:37:48 -04:00
|
|
|
Used in the logits_processor_modifier function below.
|
2023-07-25 17:49:56 -04:00
|
|
|
"""
|
|
|
|
def __init__(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def __call__(self, input_ids, scores):
|
|
|
|
# probs = torch.softmax(scores, dim=-1, dtype=torch.float)
|
|
|
|
# probs[0] /= probs[0].sum()
|
|
|
|
# scores = torch.log(probs / (1 - probs))
|
|
|
|
return scores
|
|
|
|
|
|
|
|
def history_modifier(history):
|
|
|
|
"""
|
|
|
|
Modifies the chat history.
|
|
|
|
Only used in chat mode.
|
|
|
|
"""
|
|
|
|
return history
|
|
|
|
|
|
|
|
def state_modifier(state):
|
|
|
|
"""
|
|
|
|
Modifies the state variable, which is a dictionary containing the input
|
|
|
|
values in the UI like sliders and checkboxes.
|
|
|
|
"""
|
|
|
|
return state
|
|
|
|
|
|
|
|
def chat_input_modifier(text, visible_text, state):
|
|
|
|
"""
|
2023-07-26 10:00:33 -04:00
|
|
|
Modifies the user input string in chat mode (visible_text).
|
|
|
|
You can also modify the internal representation of the user
|
|
|
|
input (text) to change how it will appear in the prompt.
|
2023-07-25 17:49:56 -04:00
|
|
|
"""
|
|
|
|
return text, visible_text
|
|
|
|
|
2023-08-13 00:12:15 -04:00
|
|
|
def input_modifier(string, state, is_chat=False):
|
2023-07-25 17:49:56 -04:00
|
|
|
"""
|
|
|
|
In default/notebook modes, modifies the whole prompt.
|
2023-07-26 10:00:33 -04:00
|
|
|
|
|
|
|
In chat mode, it is the same as chat_input_modifier but only applied
|
|
|
|
to "text", here called "string", and not to "visible_text".
|
2023-07-25 17:49:56 -04:00
|
|
|
"""
|
|
|
|
return string
|
|
|
|
|
|
|
|
def bot_prefix_modifier(string, state):
|
|
|
|
"""
|
|
|
|
Modifies the prefix for the next bot reply in chat mode.
|
|
|
|
By default, the prefix will be something like "Bot Name:".
|
|
|
|
"""
|
|
|
|
return string
|
|
|
|
|
|
|
|
def tokenizer_modifier(state, prompt, input_ids, input_embeds):
|
|
|
|
"""
|
|
|
|
Modifies the input ids and embeds.
|
|
|
|
Used by the multimodal extension to put image embeddings in the prompt.
|
|
|
|
Only used by loaders that use the transformers library for sampling.
|
|
|
|
"""
|
|
|
|
return prompt, input_ids, input_embeds
|
|
|
|
|
|
|
|
def logits_processor_modifier(processor_list, input_ids):
|
|
|
|
"""
|
2023-07-26 10:00:33 -04:00
|
|
|
Adds logits processors to the list, allowing you to access and modify
|
|
|
|
the next token probabilities.
|
2023-07-25 17:49:56 -04:00
|
|
|
Only used by loaders that use the transformers library for sampling.
|
|
|
|
"""
|
|
|
|
processor_list.append(MyLogits())
|
|
|
|
return processor_list
|
|
|
|
|
2023-08-13 00:12:15 -04:00
|
|
|
def output_modifier(string, state, is_chat=False):
|
2023-07-25 17:49:56 -04:00
|
|
|
"""
|
|
|
|
Modifies the LLM output before it gets presented.
|
|
|
|
|
2023-07-26 10:00:33 -04:00
|
|
|
In chat mode, the modified version goes into history['visible'],
|
|
|
|
and the original version goes into history['internal'].
|
2023-07-25 17:49:56 -04:00
|
|
|
"""
|
|
|
|
return string
|
|
|
|
|
|
|
|
def custom_generate_chat_prompt(user_input, state, **kwargs):
|
|
|
|
"""
|
|
|
|
Replaces the function that generates the prompt from the chat history.
|
|
|
|
Only used in chat mode.
|
|
|
|
"""
|
|
|
|
result = chat.generate_chat_prompt(user_input, state, **kwargs)
|
|
|
|
return result
|
|
|
|
|
|
|
|
def custom_css():
|
|
|
|
"""
|
|
|
|
Returns a CSS string that gets appended to the CSS for the webui.
|
|
|
|
"""
|
|
|
|
return ''
|
|
|
|
|
|
|
|
def custom_js():
|
|
|
|
"""
|
2023-07-26 10:00:33 -04:00
|
|
|
Returns a javascript string that gets appended to the javascript
|
|
|
|
for the webui.
|
2023-07-25 17:49:56 -04:00
|
|
|
"""
|
|
|
|
return ''
|
|
|
|
|
|
|
|
def setup():
|
|
|
|
"""
|
|
|
|
Gets executed only once, when the extension is imported.
|
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
|
|
|
def ui():
|
|
|
|
"""
|
2023-07-26 10:49:22 -04:00
|
|
|
Gets executed when the UI is drawn. Custom gradio elements and
|
|
|
|
their corresponding event handlers should be defined here.
|
|
|
|
|
|
|
|
To learn about gradio components, check out the docs:
|
|
|
|
https://gradio.app/docs/
|
2023-07-25 17:49:56 -04:00
|
|
|
"""
|
|
|
|
pass
|