mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Expand superbooga to instruct mode and change the chat implementation
This commit is contained in:
parent
c746a5bd00
commit
826c74c201
@ -21,7 +21,7 @@ posthog.capture = lambda *args, **kwargs: None
|
|||||||
params = {
|
params = {
|
||||||
'chunk_count': 5,
|
'chunk_count': 5,
|
||||||
'chunk_length': 700,
|
'chunk_length': 700,
|
||||||
'strong_cleanup': True,
|
'strong_cleanup': False,
|
||||||
'threads': 4,
|
'threads': 4,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,16 +82,18 @@ class SentenceTransformerEmbedder(Embedder):
|
|||||||
|
|
||||||
embedder = SentenceTransformerEmbedder()
|
embedder = SentenceTransformerEmbedder()
|
||||||
collector = ChromaCollector(embedder)
|
collector = ChromaCollector(embedder)
|
||||||
|
chat_collector = ChromaCollector(embedder)
|
||||||
chunk_count = 5
|
chunk_count = 5
|
||||||
|
|
||||||
|
|
||||||
def add_chunks_to_collector(chunks):
|
def add_chunks_to_collector(chunks, collector):
|
||||||
global collector
|
|
||||||
collector.clear()
|
collector.clear()
|
||||||
collector.add(chunks)
|
collector.add(chunks)
|
||||||
|
|
||||||
|
|
||||||
def feed_data_into_collector(corpus, chunk_len):
|
def feed_data_into_collector(corpus, chunk_len):
|
||||||
|
global collector
|
||||||
|
|
||||||
# Defining variables
|
# Defining variables
|
||||||
chunk_len = int(chunk_len)
|
chunk_len = int(chunk_len)
|
||||||
cumulative = ''
|
cumulative = ''
|
||||||
@ -102,7 +104,7 @@ def feed_data_into_collector(corpus, chunk_len):
|
|||||||
data_chunks = [corpus[i:i + chunk_len] for i in range(0, len(corpus), chunk_len)]
|
data_chunks = [corpus[i:i + chunk_len] for i in range(0, len(corpus), chunk_len)]
|
||||||
cumulative += f"{len(data_chunks)} chunks have been found.\n\nAdding the chunks to the database...\n\n"
|
cumulative += f"{len(data_chunks)} chunks have been found.\n\nAdding the chunks to the database...\n\n"
|
||||||
yield cumulative
|
yield cumulative
|
||||||
add_chunks_to_collector(data_chunks)
|
add_chunks_to_collector(data_chunks, collector)
|
||||||
cumulative += "Done."
|
cumulative += "Done."
|
||||||
yield cumulative
|
yield cumulative
|
||||||
|
|
||||||
@ -136,9 +138,6 @@ def feed_url_into_collector(urls, chunk_len, strong_cleanup, threads):
|
|||||||
strings = [s for s in strings if re.search("[A-Za-z] ", s)]
|
strings = [s for s in strings if re.search("[A-Za-z] ", s)]
|
||||||
|
|
||||||
text = '\n'.join([s.strip() for s in strings])
|
text = '\n'.join([s.strip() for s in strings])
|
||||||
lines = (line.strip() for line in text.splitlines())
|
|
||||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
|
||||||
text = '\n\n'.join(chunk for chunk in chunks if chunk)
|
|
||||||
all_text += text
|
all_text += text
|
||||||
|
|
||||||
for i in feed_data_into_collector(all_text, chunk_len):
|
for i in feed_data_into_collector(all_text, chunk_len):
|
||||||
@ -154,6 +153,52 @@ def apply_settings(_chunk_count):
|
|||||||
yield f"The following settings are now active: {str(settings_to_display)}"
|
yield f"The following settings are now active: {str(settings_to_display)}"
|
||||||
|
|
||||||
|
|
||||||
|
def custom_generate_chat_prompt(user_input, state, **kwargs):
|
||||||
|
global chat_collector
|
||||||
|
|
||||||
|
if state['mode'] == 'instruct':
|
||||||
|
results = collector.get(user_input, n_results=chunk_count)
|
||||||
|
additional_context = '\nConsider the excerpts below as additional context:\n\n' + '\n'.join(results)
|
||||||
|
user_input += additional_context
|
||||||
|
else:
|
||||||
|
|
||||||
|
def make_single_exchange(id_):
|
||||||
|
output = ''
|
||||||
|
output += f"{state['name1']}: {shared.history['internal'][id_][0]}\n"
|
||||||
|
output += f"{state['name2']}: {shared.history['internal'][id_][1]}\n"
|
||||||
|
return output
|
||||||
|
|
||||||
|
if len(shared.history['internal']) > chunk_count and user_input != '':
|
||||||
|
chunks = []
|
||||||
|
hist_size = len(shared.history['internal'])
|
||||||
|
for i in range(hist_size-1):
|
||||||
|
chunks.append(make_single_exchange(i))
|
||||||
|
|
||||||
|
add_chunks_to_collector(chunks, chat_collector)
|
||||||
|
query = '\n'.join(shared.history['internal'][-1] + [user_input])
|
||||||
|
try:
|
||||||
|
best_ids = chat_collector.get_ids(query, n_results=chunk_count)
|
||||||
|
additional_context = '\n'
|
||||||
|
for id_ in best_ids:
|
||||||
|
if shared.history['internal'][id_][0] != '<|BEGIN-VISIBLE-CHAT|>':
|
||||||
|
additional_context += make_single_exchange(id_)
|
||||||
|
|
||||||
|
logging.warning(f'Adding the following new context:\n{additional_context}')
|
||||||
|
state['context'] = state['context'].strip() + '\n' + additional_context
|
||||||
|
state['history'] = [shared.history['internal'][i] for i in range(hist_size) if i not in best_ids]
|
||||||
|
except RuntimeError:
|
||||||
|
logging.error("Couldn't query the database, moving on...")
|
||||||
|
|
||||||
|
return chat.generate_chat_prompt(user_input, state, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_special_tokens(string):
|
||||||
|
for k in ['<|begin-user-input|>', '<|end-user-input|>', '<|injection-point|>']:
|
||||||
|
string = string.replace(k, '')
|
||||||
|
|
||||||
|
return string.strip()
|
||||||
|
|
||||||
|
|
||||||
def input_modifier(string):
|
def input_modifier(string):
|
||||||
if shared.is_chat():
|
if shared.is_chat():
|
||||||
return string
|
return string
|
||||||
@ -164,39 +209,18 @@ def input_modifier(string):
|
|||||||
if match:
|
if match:
|
||||||
user_input = match.group(1).strip()
|
user_input = match.group(1).strip()
|
||||||
else:
|
else:
|
||||||
user_input = ''
|
return remove_special_tokens(string)
|
||||||
|
|
||||||
# Get the most similar chunks
|
# Get the most similar chunks
|
||||||
results = collector.get(user_input, n_results=chunk_count)
|
results = collector.get(user_input, n_results=chunk_count)
|
||||||
|
|
||||||
# Make the replacements
|
# Make the replacements
|
||||||
string = string.replace('<|begin-user-input|>', '')
|
string = string.replace('<|begin-user-input|>', '').replace('<|end-user-input|>', '')
|
||||||
string = string.replace('<|end-user-input|>', '')
|
|
||||||
string = string.replace('<|injection-point|>', '\n'.join(results))
|
string = string.replace('<|injection-point|>', '\n'.join(results))
|
||||||
|
|
||||||
return string
|
return string
|
||||||
|
|
||||||
|
|
||||||
def custom_generate_chat_prompt(user_input, state, **kwargs):
|
|
||||||
if len(shared.history['internal']) > 2 and user_input != '':
|
|
||||||
chunks = []
|
|
||||||
for i in range(len(shared.history['internal']) - 1):
|
|
||||||
chunks.append('\n'.join(shared.history['internal'][i]))
|
|
||||||
|
|
||||||
add_chunks_to_collector(chunks)
|
|
||||||
query = '\n'.join(shared.history['internal'][-1] + [user_input])
|
|
||||||
try:
|
|
||||||
best_ids = collector.get_ids(query, n_results=len(shared.history['internal']) - 1)
|
|
||||||
|
|
||||||
# Sort the history by relevance instead of by chronological order,
|
|
||||||
# except for the latest message
|
|
||||||
state['history'] = [shared.history['internal'][id_] for id_ in best_ids[::-1]] + [shared.history['internal'][-1]]
|
|
||||||
except RuntimeError:
|
|
||||||
logging.error("Couldn't query the database, moving on...")
|
|
||||||
|
|
||||||
return chat.generate_chat_prompt(user_input, state, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def ui():
|
def ui():
|
||||||
with gr.Accordion("Click for more information...", open=False):
|
with gr.Accordion("Click for more information...", open=False):
|
||||||
gr.Markdown(textwrap.dedent("""
|
gr.Markdown(textwrap.dedent("""
|
||||||
@ -205,26 +229,40 @@ def ui():
|
|||||||
|
|
||||||
This extension takes a dataset as input, breaks it into chunks, and adds the result to a local/offline Chroma database.
|
This extension takes a dataset as input, breaks it into chunks, and adds the result to a local/offline Chroma database.
|
||||||
|
|
||||||
The database is then queried during inference time to get the excerpts that are closest to your input. The idea is to create
|
The database is then queried during inference time to get the excerpts that are closest to your input. The idea is to create an arbitrarily large pseudo context.
|
||||||
an arbitrarily large pseudocontext.
|
|
||||||
|
|
||||||
It is a modified version of the superbig extension by kaiokendev: https://github.com/kaiokendev/superbig
|
The core methodology was developed and contributed by kaiokendev, who is working on improvements to the method in this repository: https://github.com/kaiokendev/superbig
|
||||||
|
|
||||||
|
## Data input
|
||||||
|
|
||||||
|
Start by entering some data in the interface below and then clicking on "Load data".
|
||||||
|
|
||||||
|
Each time you load some new data, the old chunks are discarded.
|
||||||
|
|
||||||
|
## Chat mode
|
||||||
|
|
||||||
|
#### Instruct
|
||||||
|
|
||||||
|
On each turn, the chunks will be compared to your current input and the most relevant matches will be appended to the input in the following format:
|
||||||
|
|
||||||
|
```
|
||||||
|
Consider the excerpts below as additional context:
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
The injection doesn't make it into the chat history. It is only used in the current generation.
|
||||||
|
|
||||||
|
#### Regular chat
|
||||||
|
|
||||||
|
The chunks from the external data sources are ignored, and the chroma database is built based on the chat history instead. The most relevant past exchanges relative to the present input are added to the context string. This way, the extension acts as a long term memory.
|
||||||
|
|
||||||
## Notebook/default modes
|
## Notebook/default modes
|
||||||
|
|
||||||
### How to use it
|
Your question must be manually specified between `<|begin-user-input|>` and `<|end-user-input|>` tags, and the injection point must be specified with `<|injection-point|>`.
|
||||||
|
|
||||||
1) Paste your input text (of whatever length) into the text box below.
|
The special tokens mentioned above (`<|begin-user-input|>`, `<|end-user-input|>`, and `<|injection-point|>`) are removed in the background before the text generation begins.
|
||||||
2) Click on "Load data" to feed this text into the Chroma database.
|
|
||||||
3) In your prompt, enter your question between `<|begin-user-input|>` and `<|end-user-input|>`, and specify the injection point with `<|injection-point|>`.
|
|
||||||
|
|
||||||
By default, the 5 closest chunks will be injected. You can customize this value in the "Generation settings" tab.
|
Here is an example in Vicuna 1.1 format:
|
||||||
|
|
||||||
The special tokens mentioned above (`<|begin-user-input|>`, `<|end-user-input|>`, and `<|injection-point|>`) are removed when the injection happens.
|
|
||||||
|
|
||||||
### Example
|
|
||||||
|
|
||||||
For your convenience, you can use the following prompt as a starting point (for Vicuna 1.1 models):
|
|
||||||
|
|
||||||
```
|
```
|
||||||
A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
|
A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
|
||||||
@ -242,51 +280,35 @@ def ui():
|
|||||||
|
|
||||||
⚠️ For best results, make sure to remove the spaces and new line characters after `ASSISTANT:`.
|
⚠️ For best results, make sure to remove the spaces and new line characters after `ASSISTANT:`.
|
||||||
|
|
||||||
## Chat mode
|
|
||||||
|
|
||||||
In chat mode, the extension automatically sorts the history by relevance instead of chronologically, except for the very latest input/reply pair.
|
|
||||||
|
|
||||||
That is, the prompt will include (starting from the end):
|
|
||||||
|
|
||||||
* Your input
|
|
||||||
* The latest input/reply pair
|
|
||||||
* The #1 most relevant input/reply pair prior to the latest
|
|
||||||
* The #2 most relevant input/reply pair prior to the latest
|
|
||||||
* Etc
|
|
||||||
|
|
||||||
This way, the bot can have a long term history.
|
|
||||||
|
|
||||||
*This extension is currently experimental and under development.*
|
*This extension is currently experimental and under development.*
|
||||||
|
|
||||||
"""))
|
"""))
|
||||||
|
|
||||||
if not shared.is_chat():
|
with gr.Row():
|
||||||
with gr.Row():
|
with gr.Column(min_width=600):
|
||||||
with gr.Column():
|
with gr.Tab("Text input"):
|
||||||
with gr.Tab("Text input"):
|
data_input = gr.Textbox(lines=20, label='Input data')
|
||||||
data_input = gr.Textbox(lines=20, label='Input data')
|
update_data = gr.Button('Load data')
|
||||||
update_data = gr.Button('Load data')
|
|
||||||
|
|
||||||
with gr.Tab("URL input"):
|
with gr.Tab("URL input"):
|
||||||
url_input = gr.Textbox(lines=10, label='Input URLs', info='Enter one or more URLs separated by newline characters.')
|
url_input = gr.Textbox(lines=10, label='Input URLs', info='Enter one or more URLs separated by newline characters.')
|
||||||
strong_cleanup = gr.Checkbox(value=params['strong_cleanup'], label='Strong cleanup', info='Only keeps html elements that look like long-form text.')
|
strong_cleanup = gr.Checkbox(value=params['strong_cleanup'], label='Strong cleanup', info='Only keeps html elements that look like long-form text.')
|
||||||
threads = gr.Number(value=params['threads'], label='Threads', info='The number of threads to use while downloading the URLs.', precision=0)
|
threads = gr.Number(value=params['threads'], label='Threads', info='The number of threads to use while downloading the URLs.', precision=0)
|
||||||
update_url = gr.Button('Load data')
|
update_url = gr.Button('Load data')
|
||||||
|
|
||||||
with gr.Tab("File input"):
|
with gr.Tab("File input"):
|
||||||
file_input = gr.File(label='Input file', type='binary')
|
file_input = gr.File(label='Input file', type='binary')
|
||||||
update_file = gr.Button('Load data')
|
update_file = gr.Button('Load data')
|
||||||
|
|
||||||
with gr.Tab("Generation settings"):
|
with gr.Tab("Generation settings"):
|
||||||
chunk_count = gr.Number(value=params['chunk_count'], label='Chunk count', info='The number of closest-matching chunks to include in the prompt.')
|
chunk_count = gr.Number(value=params['chunk_count'], label='Chunk count', info='The number of closest-matching chunks to include in the prompt.')
|
||||||
update_settings = gr.Button('Apply changes')
|
update_settings = gr.Button('Apply changes')
|
||||||
|
|
||||||
chunk_len = gr.Number(value=params['chunk_length'], label='Chunk length', info='In characters, not tokens. This value is used when you click on "Load data".')
|
chunk_len = gr.Number(value=params['chunk_length'], label='Chunk length', info='In characters, not tokens. This value is used when you click on "Load data".')
|
||||||
|
with gr.Column():
|
||||||
|
last_updated = gr.Markdown()
|
||||||
|
|
||||||
with gr.Column():
|
update_data.click(feed_data_into_collector, [data_input, chunk_len], last_updated, show_progress=False)
|
||||||
last_updated = gr.Markdown()
|
update_url.click(feed_url_into_collector, [url_input, chunk_len, strong_cleanup, threads], last_updated, show_progress=False)
|
||||||
|
update_file.click(feed_file_into_collector, [file_input, chunk_len], last_updated, show_progress=False)
|
||||||
update_data.click(feed_data_into_collector, [data_input, chunk_len], last_updated, show_progress=False)
|
update_settings.click(apply_settings, [chunk_count], last_updated, show_progress=False)
|
||||||
update_url.click(feed_url_into_collector, [url_input, chunk_len, strong_cleanup, threads], last_updated, show_progress=False)
|
|
||||||
update_file.click(feed_file_into_collector, [file_input, chunk_len], last_updated, show_progress=False)
|
|
||||||
update_settings.click(apply_settings, [chunk_count], last_updated, show_progress=False)
|
|
||||||
|
Loading…
Reference in New Issue
Block a user