upgraded core functions

This commit is contained in:
Saifeddine ALOUI 2024-08-15 17:28:33 +02:00
parent f2ee6fb8e7
commit 2dbfca21b0
11 changed files with 513 additions and 50 deletions

2
app.py
View File

@ -64,7 +64,7 @@ def check_and_install_package(package: str, version: str):
ASCIIColors.red(f"Error checking/installing {package}: {str(e)}")
packages: List[Tuple[str, str]] = [
("lollmsvectordb", "0.7.9"),
("lollmsvectordb", "0.8.3"),
("freedom_search", "0.1.7"),
("scrapemaster", "0.1.6"),
("lollms_client", "0.6.2")

View File

@ -92,7 +92,7 @@ async def execute_personality_command(request: CmdExecutionRequest):
lollmsElfServer.prepare_reception(client_id)
if lollmsElfServer.personality.processor is not None:
lollmsElfServer.start_time = datetime.now()
lollmsElfServer.personality.processor.callback = partial(lollmsElfServer.process_chunk, client_id=client_id)
lollmsElfServer.personality.processor.callback = partial(lollmsElfServer.process_data, client_id=client_id)
lollmsElfServer.personality.processor.execute_command(command, parameters)
else:
lollmsElfServer.warning("Non scripted personalities do not support commands",client_id=client_id)
@ -143,7 +143,7 @@ async def add_webpage(request: AddWebPageRequest):
raise HTTPException(status_code=400, detail=f"Exception : {e}")
try:
client.discussion.add_file(file_path, client, partial(lollmsElfServer.process_chunk, client_id = request.client_id))
client.discussion.add_file(file_path, client, partial(lollmsElfServer.process_data, client_id = request.client_id))
# File saved successfully
lollmsElfServer.HideBlockingMessage()
lollmsElfServer.refresh_files()

View File

@ -0,0 +1,28 @@
# LollmsClient Quick Start
1. Initialize:
```javascript
const client = new LollmsClient('http://localhost:9600', <(optional) model name>);
```
2. Generate Text:
```javascript
const response = await client.generateText("Write a short story.");
console.log(response);
```
3. Tokenize/Detokenize:
```javascript
const tokens = await client.tokenize("Hello, world!");
// the tokens are a list of a list, the first entry is the token text and the second is the token id
// Extract only the token IDs from the tokenized result
const tokenIds = tokens.map(token => token[1]);
// Use the token IDs for detokenization
const text = await client.detokenize(tokenIds);
```
4. List Resources:
```javascript
const personalities = await client.listMountedPersonalities();
const models = await client.listModels();
```

View File

@ -0,0 +1,206 @@
# lollms_client.js
This JavaScript library provides a client-side interface for interacting with the LoLLMs server. It allows you to:
- Generate text using various models.
- Tokenize and detokenize text.
- Manage personalities and models.
- Perform tasks like translation and summarization.
- Update code based on user input.
- Interact with a RAG (Retrieval Augmented Generation) database.
## Installation
You can include the `lollms_client.js` file in your HTML project by adding the following script tag:
```html
<script src="/lollms_assets/js/lollms_client.js"></script>
```
## Usage
### 1. Initialization
Create a new instance of the `LollmsClient` class, providing the necessary parameters:
```javascript
const lollmsClient = new LollmsClient(
'http://localhost:9600', // Host address of the LoLLMs server
'gpt-3.5-turbo', // Name of the model to use
4096, // Context size
-1, // Personality (optional)
4096, // Number of tokens to predict (optional)
0.1, // Temperature (optional)
50, // Top-k (optional)
0.95, // Top-p (optional)
0.8, // Repeat penalty (optional)
40, // Repeat last n (optional)
null, // Seed (optional)
8, // Number of threads (optional)
'', // Service key (optional)
ELF_GENERATION_FORMAT.LOLLMS // Default generation mode (optional)
);
```
### 2. Generating Text
Use the `generateText` method to generate text from the LoLLMs server:
```javascript
const prompt = "Write a short story about a cat who goes on an adventure.";
const response = await lollmsClient.generateText(prompt);
console.log(response);
```
### 3. Tokenization and Detokenization
Use the `tokenize` and `detokenize` methods to convert text to and from tokens:
```javascript
const tokens = await lollmsClient.tokenize("Hello, world!");
console.log(tokens);
const text = await lollmsClient.detokenize(tokens);
console.log(text);
```
### 4. Managing Personalities and Models
Use the `listMountedPersonalities` and `listModels` methods to retrieve information about available personalities and models:
```javascript
const personalities = await lollmsClient.listMountedPersonalities();
console.log(personalities);
const models = await lollmsClient.listModels();
console.log(models);
```
### 5. Performing Tasks
The `TasksLibrary` class provides methods for performing common tasks:
```javascript
const tasksLibrary = new TasksLibrary(lollmsClient);
const translatedText = await tasksLibrary.translateTextChunk("Hello, world!", "french");
console.log(translatedText);
const summary = await tasksLibrary.summarizeText("This is a long text that needs to be summarized.", "short");
console.log(summary);
```
### 6. Updating Code
The `updateCode` method allows you to update code based on user input:
```javascript
const originalCode = "const message = 'Hello, world!';";
const queryString = "# FULL_REWRITE\nconst message = 'Goodbye, world!';";
const updatedCode = lollmsClient.updateCode(originalCode, queryString).updatedCode;
console.log(updatedCode);
```
### 7. RAG (Retrieval Augmented Generation)
The `LOLLMSRAGClient` class provides methods for interacting with a RAG database:
```javascript
const ragClient = new LOLLMSRAGClient('http://localhost:9600', 'your_bearer_token');
await ragClient.addDocument('My Title', 'This is the content of the document.');
const searchResults = await ragClient.search('What is the content of the document?');
console.log(searchResults);
```
## API Reference
### LollmsClient
- **constructor(host_address, model_name, ctx_size, personality, n_predict, temperature, top_k, top_p, repeat_penalty, repeat_last_n, seed, n_threads, service_key, default_generation_mode)**: Initializes a new LollmsClient instance.
- **generateText(prompt, options)**: Generates text from the LoLLMs server.
- **tokenize(prompt)**: Tokenizes the given prompt.
- **detokenize(tokensList)**: Detokenizes the given list of tokens.
- **generate(prompt, options)**: Generates text using the specified generation mode.
- **generate_with_images(prompt, images, options)**: Generates text with images.
- **lollms_generate(prompt, host_address, model_name, personality, n_predict, stream, temperature, top_k, top_p, repeat_penalty, repeat_last_n, seed, n_threads, service_key, streamingCallback)**: Generates text using the LoLLMs generation mode.
- **lollms_generate_with_images(prompt, images, host_address, model_name, personality, n_predict, stream, temperature, top_k, top_p, repeat_penalty, repeat_last_n, seed, n_threads, service_key, streamingCallback)**: Generates text with images using the LoLLMs generation mode.
- **openai_generate(prompt, host_address, model_name, personality, n_predict, stream, temperature, top_k, top_p, repeat_penalty, repeat_last_n, seed, n_threads, ELF_COMPLETION_FORMAT, service_key, streamingCallback)**: Generates text using the OpenAI generation mode.
- **listMountedPersonalities(host_address)**: Lists mounted personalities.
- **listModels(host_address)**: Lists available models.
### TasksLibrary
- **constructor(lollms)**: Initializes a new TasksLibrary instance.
- **translateTextChunk(textChunk, outputLanguage, host_address, model_name, temperature, maxGenerationSize)**: Translates a text chunk to the specified language.
- **summarizeText(textChunk, summaryLength, host_address, model_name, temperature, maxGenerationSize)**: Summarizes a text chunk.
- **yesNo(question, context, maxAnswerLength, conditioning)**: Determines if a question is asking for a yes/no answer.
- **multichoiceQuestion(question, possibleAnswers, context, maxAnswerLength, conditioning)**: Interprets a multi-choice question.
- **buildPrompt(promptParts, sacrificeId, contextSize, minimumSpareContextSize)**: Builds a prompt for code generation.
- **extractCodeBlocks(text)**: Extracts code blocks from a text.
- **updateCode(originalCode, queryString)**: Updates code based on a query string.
### LOLLMSRAGClient
- **constructor(baseURL, apiKey)**: Initializes a new LOLLMSRAGClient instance.
- **request(endpoint, method, body)**: Makes a request to the RAG server.
- **addDocument(title, content, path)**: Adds a document to the RAG database.
- **removeDocument(documentId)**: Removes a document from the RAG database.
- **indexDatabase()**: Indexes the RAG database.
- **search(query)**: Searches the RAG database for documents matching the query.
- **wipeDatabase()**: Wipes the RAG database.
## Examples
### Generating Text
```javascript
const lollmsClient = new LollmsClient('http://localhost:9600', 'gpt-3.5-turbo');
const prompt = "Write a short story about a cat who goes on an adventure.";
const response = await lollmsClient.generateText(prompt);
console.log(response);
```
### Translating Text
```javascript
const tasksLibrary = new TasksLibrary(lollmsClient);
const translatedText = await tasksLibrary.translateTextChunk("Hello, world!", "french");
console.log(translatedText);
```
### Updating Code
```javascript
const originalCode = "const message = 'Hello, world!';";
const queryString = "# FULL_REWRITE\nconst message = 'Goodbye, world!';";
const updatedCode = lollmsClient.updateCode(originalCode, queryString).updatedCode;
console.log(updatedCode);
```
### RAG Search
```javascript
const ragClient = new LOLLMSRAGClient('http://localhost:9600', 'your_bearer_token');
await ragClient.addDocument('My Title', 'This is the content of the document.');
const searchResults = await ragClient.search('What is the content of the document?');
console.log(searchResults);
```
## Contributing
Contributions are welcome! Please open an issue or submit a pull request.
## License
This project is licensed under the Apache 2.0 License.
This tool was built by ParisNeo. The bearer key is optional and only required if it is active on the server side.

View File

@ -0,0 +1,40 @@
# WebAppLocalizer
Quick reference for AI-assisted development of the WebAppLocalizer class.
## Import
```javascript
// Served by LoLLMs system
import WebAppLocalizer from '/lollms_assets/js/web.app.localizer';
```
## Initialization
```javascript
const localizer = new WebAppLocalizer(translations, localStoragePrefix, languageSelector);
```
## Key Methods
- `setLanguage(lang)`
- `getCurrentLanguage()`
- `getAvailableLanguages()`
- `translate(key)`
- `apply()`
## Translations Object Structure
```javascript
{
[langCode]: {
name: "Language Name",
translations: {
[key]: "Translation"
}
}
}
```
## HTML Usage
```html
<element data-translate="key"></element>
```
Apply translations: `localizer.apply();`

View File

@ -0,0 +1,96 @@
# WebAppLocalizer
## Overview
WebAppLocalizer is a JavaScript class that simplifies the process of localizing web applications. It manages translations, persists language preferences, and provides an optional UI for language selection.
## Installation
Include the WebAppLocalizer script in your HTML file:
```html
<script src="/lollms_assets/js/web.app.localizer"></script>
```
## Usage
### Initialization
Create a new instance of WebAppLocalizer:
```javascript
const localizer = new WebAppLocalizer(translations, localStoragePrefix, languageSelector);
```
Parameters:
- `translations` (required): An object containing translations for different languages.
- `localStoragePrefix` (optional): A string prefix for localStorage keys. Default: 'webAppLocalizer_'.
- `languageSelector` (optional): A DOM element or its ID for the language selector UI.
### Translations Object Structure
```javascript
const translations = {
en: {
name: "English",
translations: {
"key1": "Translation 1",
"key2": "Translation 2"
}
},
fr: {
name: "Français",
translations: {
"key1": "Traduction 1",
"key2": "Traduction 2"
}
}
};
```
### Methods
#### setLanguage(lang)
Set the current language.
#### getCurrentLanguage()
Get the current language code.
#### getAvailableLanguages()
Get an array of available languages with their codes and names.
#### translate(key)
Get the translation for a specific key.
#### apply()
Apply translations to all elements with the `data-translate` attribute.
### HTML Usage
Add the `data-translate` attribute to elements you want to localize:
```html
<h1 data-translate="welcome-message"></h1>
<p data-translate="about-us"></p>
```
## Example
```javascript
const translations = {
en: {
name: "English",
translations: {
"welcome-message": "Welcome to our website!",
"about-us": "About Us"
}
},
fr: {
name: "Français",
translations: {
"welcome-message": "Bienvenue sur notre site web!",
"about-us": "À propos de nous"
}
}
};
const localizer = new WebAppLocalizer(translations, 'myApp_', document.getElementById('language-selector'));
localizer.apply();
```
This will initialize the localizer, set up a language selector (if provided), and apply translations to your HTML elements.

View File

@ -0,0 +1,90 @@
class WebAppLocalizer {
constructor(translations, localStoragePrefix = 'webAppLocalizer_', languageSelector = null) {
this.translations = translations;
this.localStoragePrefix = localStoragePrefix;
this.currentLang = this.loadCurrentLanguage() || Object.keys(translations)[0];
this.languageSelector = languageSelector;
if (this.languageSelector) {
this.initializeLanguageSelector();
}
}
loadCurrentLanguage() {
return localStorage.getItem(this.localStoragePrefix + 'currentLang');
}
saveCurrentLanguage(lang) {
localStorage.setItem(this.localStoragePrefix + 'currentLang', lang);
}
setLanguage(lang) {
if (this.translations.hasOwnProperty(lang)) {
this.currentLang = lang;
this.saveCurrentLanguage(lang);
this.apply();
if (this.languageSelector) {
this.languageSelector.value = lang;
}
} else {
console.warn(`Language '${lang}' not found in translations.`);
}
}
getCurrentLanguage() {
return this.currentLang;
}
getAvailableLanguages() {
return Object.keys(this.translations).map(lang => ({
code: lang,
name: this.translations[lang].name
}));
}
translate(key) {
const translations = this.translations[this.currentLang].translations;
return translations[key] || key;
}
apply() {
const elements = document.querySelectorAll('[data-translate]');
elements.forEach(element => {
const key = element.getAttribute('data-translate');
element.textContent = this.translate(key);
});
}
initializeLanguageSelector() {
if (!(this.languageSelector instanceof HTMLElement)) {
console.warn('Language selector is not a valid HTML element.');
return;
}
if (this.languageSelector.tagName.toLowerCase() !== 'select') {
// Create a select element if the provided element is not a select
const selectElement = document.createElement('select');
this.languageSelector.appendChild(selectElement);
this.languageSelector = selectElement;
}
// Clear existing options
this.languageSelector.innerHTML = '';
// Add options for each available language
this.getAvailableLanguages().forEach(lang => {
const option = document.createElement('option');
option.value = lang.code;
option.textContent = lang.name;
this.languageSelector.appendChild(option);
});
// Set the current language
this.languageSelector.value = this.currentLang;
// Add event listener for language change
this.languageSelector.addEventListener('change', (event) => {
this.setLanguage(event.target.value);
});
}
}

View File

@ -73,11 +73,11 @@ def add_events(sio:socketio):
scrape_and_save(url=url, file_path=file_path)
try:
if not lollmsElfServer.personality.processor is None:
lollmsElfServer.personality.processor.add_file(file_path, client, partial(lollmsElfServer.process_chunk, client_id = sid))
lollmsElfServer.personality.processor.add_file(file_path, client, partial(lollmsElfServer.process_data, client_id = sid))
# File saved successfully
run_async(partial(sio.emit,'web_page_added', {'status':True,}))
else:
lollmsElfServer.personality.add_file(file_path, client, partial(lollmsElfServer.process_chunk, client_id = sid))
lollmsElfServer.personality.add_file(file_path, client, partial(lollmsElfServer.process_data, client_id = sid))
# File saved successfully
run_async(partial(sio.emit,'web_page_added', {'status':True}))
lollmsElfServer.HideBlockingMessage()
@ -114,15 +114,15 @@ def add_events(sio:socketio):
cv2.imwrite(str(save_path), frame)
if not lollmsElfServer.personality.processor is None:
lollmsElfServer.info("Sending file to scripted persona")
client.discussion.add_file(save_path, client, lollmsElfServer.tasks_library, partial(lollmsElfServer.process_chunk, client_id = sid))
# lollmsElfServer.personality.processor.add_file(save_path, client, partial(lollmsElfServer.process_chunk, client_id = sid))
client.discussion.add_file(save_path, client, lollmsElfServer.tasks_library, partial(lollmsElfServer.process_data, client_id = sid))
# lollmsElfServer.personality.processor.add_file(save_path, client, partial(lollmsElfServer.process_data, client_id = sid))
# File saved successfully
run_async(partial(sio.emit,'picture_taken', {'status':True, 'progress': 100}))
lollmsElfServer.info("File sent to scripted persona")
else:
lollmsElfServer.info("Sending file to persona")
client.discussion.add_file(save_path, client, lollmsElfServer.tasks_library, partial(lollmsElfServer.process_chunk, client_id = sid))
#lollmsElfServer.personality.add_file(save_path, client, partial(lollmsElfServer.process_chunk, client_id = sid))
client.discussion.add_file(save_path, client, lollmsElfServer.tasks_library, partial(lollmsElfServer.process_data, client_id = sid))
#lollmsElfServer.personality.add_file(save_path, client, partial(lollmsElfServer.process_data, client_id = sid))
# File saved successfully
run_async(partial(sio.emit,'picture_taken', {'status':True, 'progress': 100}))
lollmsElfServer.info("File sent to persona")

@ -1 +1 @@
Subproject commit 581a2157c4b96232b0fd181048f8dd7c1d1f2143
Subproject commit df4052e096a2b9aac2375b01e620a14a32d4d879

View File

@ -705,7 +705,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
callback (callable, optional): A callable with this signature (str, MSG_TYPE) to send the text to. Defaults to None.
"""
if not callback:
callback = partial(self.process_chunk,client_id = client_id)
callback = partial(self.process_data,client_id = client_id)
if callback:
callback(full_text, MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_SET_CONTENT)
@ -1017,50 +1017,52 @@ class LOLLMSWebUI(LOLLMSElfServer):
)
)
def process_chunk(
def process_data(
self,
chunk:str|None,
data:str|list|None,
operation_type:MSG_OPERATION_TYPE,
metadata:list=None,
client_id:int=0,
personality:AIPersonality=None
):
"""
Processes a chunk of generated text
Processes a data of generated text
"""
client = self.session.get_client(client_id)
if chunk is None and operation_type in [MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_SET_CONTENT, MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_ADD_CHUNK]:
if data is None and operation_type in [MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_SET_CONTENT, MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_ADD_CHUNK]:
return
if chunk is not None:
if data is not None:
if not client_id in list(self.session.clients.keys()):
self.error("Connection lost", client_id=client_id)
return
if operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_STEP:
ASCIIColors.info("--> Step:"+chunk)
ASCIIColors.info("--> Step:"+data)
if operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_STEP_START:
ASCIIColors.info("--> Step started:"+chunk)
self.update_message_step(client_id, chunk, operation_type)
ASCIIColors.info("--> Step started:"+data)
self.update_message_step(client_id, data, operation_type)
if operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_STEP_END_SUCCESS:
ASCIIColors.success("--> Step ended:"+chunk)
self.update_message_step(client_id, chunk, operation_type)
ASCIIColors.success("--> Step ended:"+data)
self.update_message_step(client_id, data, operation_type)
if operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_STEP_END_FAILURE:
ASCIIColors.success("--> Step ended:"+chunk)
self.update_message_step(client_id, chunk, operation_type)
ASCIIColors.success("--> Step ended:"+data)
self.update_message_step(client_id, data, operation_type)
if operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_WARNING:
self.warning(chunk,client_id=client_id)
ASCIIColors.error("--> Exception from personality:"+chunk)
self.warning(data,client_id=client_id)
ASCIIColors.error("--> Exception from personality:"+data)
if operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_EXCEPTION:
self.error(chunk, client_id=client_id)
ASCIIColors.error("--> Exception from personality:"+chunk)
self.error(data, client_id=client_id)
ASCIIColors.error("--> Exception from personality:"+data)
return
if operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_INFO:
self.info(chunk, client_id=client_id)
ASCIIColors.info("--> Info:"+chunk)
self.info(data, client_id=client_id)
ASCIIColors.info("--> Info:"+data)
return
if operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_UI:
self.update_message_ui(client_id, chunk)
self.update_message_ui(client_id, data)
return
if operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_JSON_INFOS:
self.update_message_metadata(client_id, data)
return
if operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_NEW_MESSAGE:
self.nb_received_tokens = 0
self.start_time = datetime.now()
@ -1069,13 +1071,13 @@ class LOLLMSWebUI(LOLLMSElfServer):
self.new_message(
client_id,
self.personality.name if personality is None else personality.name,
chunk,
data,
message_type = MSG_TYPE.MSG_TYPE_CONTENT
)
return
elif operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_FINISHED_MESSAGE:
self.close_message(client_id)
return
elif operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_ADD_CHUNK:
if self.nb_received_tokens==0:
self.start_time = datetime.now()
@ -1090,12 +1092,12 @@ class LOLLMSWebUI(LOLLMSElfServer):
dt=1
spd = self.nb_received_tokens/dt
if self.config.debug_show_chunks:
print(chunk,end="",flush=True)
print(data,end="",flush=True)
#ASCIIColors.green(f"Received {self.nb_received_tokens} tokens (speed: {spd:.2f}t/s) ",end="\r",flush=True)
sys.stdout = sys.__stdout__
sys.stdout.flush()
if chunk:
client.generated_text += chunk
if data:
client.generated_text += data
antiprompt = self.personality.detect_antiprompt(client.generated_text)
if antiprompt:
ASCIIColors.warning(f"\n{antiprompt} detected. Stopping generation")
@ -1107,7 +1109,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
if client.continuing and client.first_chunk:
self.update_message_content(client_id, client.generated_text, MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_SET_CONTENT)
else:
self.update_message_content(client_id, chunk, MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_ADD_CHUNK)
self.update_message_content(client_id, data, MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_ADD_CHUNK)
client.first_chunk=False
# if stop generation is detected then stop
@ -1117,7 +1119,6 @@ class LOLLMSWebUI(LOLLMSElfServer):
self.cancel_gen = False
ASCIIColors.warning("Generation canceled")
return False
# Stream the generated text to the main process
elif operation_type in [MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_SET_CONTENT, MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_SET_CONTENT_INVISIBLE_TO_AI, MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_SET_CONTENT_INVISIBLE_TO_USER]:
if self.nb_received_tokens==0:
@ -1129,7 +1130,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
except Exception as ex:
ASCIIColors.warning("Couldn't send status update to client")
client.generated_text = chunk
client.generated_text = data
antiprompt = self.personality.detect_antiprompt(client.generated_text)
if antiprompt:
ASCIIColors.warning(f"\n{antiprompt} detected. Stopping generation")
@ -1137,11 +1138,11 @@ class LOLLMSWebUI(LOLLMSElfServer):
self.update_message_content(client_id, client.generated_text, operation_type)
return False
self.update_message_content(client_id, chunk, operation_type)
self.update_message_content(client_id, data, operation_type)
return True
# Stream the generated text to the frontend
else:
self.update_message_content(client_id, chunk, operation_type)
self.update_message_content(client_id, data, operation_type)
return True
@ -1224,6 +1225,8 @@ class LOLLMSWebUI(LOLLMSElfServer):
def _generate(self, prompt, n_predict, client_id, callback=None):
client = self.session.get_client(client_id)
if client is None:
return None
self.nb_received_tokens = 0
self.start_time = datetime.now()
if self.model is not None:
@ -1271,7 +1274,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
try:
post_processed_output = process_ai_output(output, client.discussion.image_files, client.discussion.discussion_folder)
if len(post_processed_output)!=output:
self.process_chunk(post_processed_output, MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_SET_CONTENT,client_id=client_id)
self.process_data(post_processed_output, MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_SET_CONTENT,client_id=client_id)
except Exception as ex:
ASCIIColors.error(str(ex))
else:
@ -1342,11 +1345,11 @@ class LOLLMSWebUI(LOLLMSElfServer):
context_details=context_details,
n_predict = min(self.config.ctx_size-len(tokens)-1,self.config.max_n_predict),
client_id=client_id,
callback=partial(self.process_chunk,client_id = client_id)
callback=partial(self.process_data,client_id = client_id)
)
if self.tts and self.config.auto_read and len(self.personality.audio_samples)>0:
try:
self.process_chunk("Generating voice output",MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_STEP_START,client_id=client_id)
self.process_data("Generating voice output",MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_STEP_START,client_id=client_id)
from lollms.services.xtts.lollms_xtts import LollmsXTTS
voice=self.config.xtts_current_voice
if voice!="main_voice":
@ -1367,8 +1370,8 @@ class LOLLMSWebUI(LOLLMSElfServer):
f' Your browser does not support the audio element.',
f'</audio>'
])
self.process_chunk("Generating voice output", operation_type= MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_STEP_END_SUCCESS,client_id=client_id)
self.process_chunk(fl,MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_UI, client_id=client_id)
self.process_data("Generating voice output", operation_type= MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_STEP_END_SUCCESS,client_id=client_id)
self.process_data(fl,MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_UI, client_id=client_id)
else:
self.InfoMessage("xtts is not up yet.\nPlease wait for it to load then try again. This may take some time.")
@ -1555,6 +1558,6 @@ class LOLLMSWebUI(LOLLMSElfServer):
client.generated_text = ""
ASCIIColors.info(f"prompt has {self.config.ctx_size-context_details['available_space']} tokens")
ASCIIColors.info(f"warmup for generating up to {min(context_details['available_space'],self.config.max_n_predict)} tokens")
self.generate(discussion_messages, current_message, context_details, min(self.config.ctx_size-len(tokens)-1, self.config.max_n_predict), client.client_id, callback if callback else partial(self.process_chunk, client_id=client.client_id))
self.generate(discussion_messages, current_message, context_details, min(self.config.ctx_size-len(tokens)-1, self.config.max_n_predict), client.client_id, callback if callback else partial(self.process_data, client_id=client.client_id))
self.close_message(client.client_id)
return client.generated_text

@ -1 +1 @@
Subproject commit a80d7d87da902c6bd9d09ec5f86e1516b1815b4c
Subproject commit c3f64f7029d3bba82971259c248903c51eb490b1